public String toString() { StringBuilder sb = new StringBuilder().append("BucketId(0x"); String number = Long.toHexString(getId()); for (int i=number.length(); i<16; ++i) { sb.append('0'); } sb.append(number).append(')'); return sb.toString(); }
/** * This method maps the given bucket id to its corresponding column. * * @param bucketId The bucket whose column to lookup. * @return The column to distribute the bucket to. */ public int getColumn(BucketId bucketId) { int ret = (int)(bucketId.getId() & (getNumBuckets(numBucketBits) - 1)); if (ret >= bucketToColumn.length) { log.log(Level.SEVERE, "The bucket distribution map is not in sync with the number of bucket bits. " + "This should never happen! Distribution is broken!!"); return 0; } return bucketToColumn[ret]; } }
/** * This method maps the given bucket id to its corresponding column. * * @param bucketId The bucket whose column to lookup. * @return The column to distribute the bucket to. */ public int getColumn(BucketId bucketId) { int ret = (int)(bucketId.getId() & (getNumBuckets(numBucketBits) - 1)); if (ret >= bucketToColumn.length) { log.log(Level.SEVERE, "The bucket distribution map is not in sync with the number of bucket bits. " + "This should never happen! Distribution is broken!!"); return 0; } return bucketToColumn[ret]; } }
private static BucketKeyWrapper bucketToKeyWrapper(BucketId bucket) { return new BucketKeyWrapper(bucketToKey(bucket.getId())); } /*
protected boolean isLosslessResetPossible() { // #pending must be equal to cursor, i.e. all buckets ever fetched // must be located in the set of pending if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) { return false; } // Check if all pending buckets have a progress of 0 for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry : progressToken.getBuckets().entrySet()) { if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) { return false; } if (entry.getValue().getProgress().getId() != 0) { return false; } } return true; }
if ((bucket.getId() & (1L << (usedBits - 1))) == 0) { BucketId rightCheck = new BucketId(usedBits, bucket.getId() | (1L << (usedBits - 1))); BucketEntry rightSibling = buckets.get(bucketToKeyWrapper(rightCheck)); BucketId leftSanityCheck = new BucketId(usedBits, bucket.getId() & ~(1L << (usedBits - 1))); BucketEntry leftSibling = buckets.get(bucketToKeyWrapper(leftSanityCheck)); assert(leftSibling == null) : "bucket merge sanity checking failed"; BucketId newMerged = new BucketId(usedBits - 1, bucket.getId()); addBucket(newMerged, entry.getProgress(), BucketState.BUCKET_PENDING);
/** * @param superbucket The superbucket of which <code>progress</code> is * a sub-bucket * @param progress The sub-bucket for which a fractional progress should * be calculated * @return a value in [0, 1] specifying how far the (sub-bucket) has * reached in its superbucket. This is calculated by looking at the * bucket's split factor. */ public synchronized double progressFraction(BucketId superbucket, BucketId progress) { long revBits = bucketToKey(progress.getId()); int superUsed = superbucket.getUsedBits(); int progressUsed = progress.getUsedBits(); if (progressUsed == 0 || progressUsed < superUsed) { return 0; } int splitCount = progressUsed - superUsed; if (splitCount == 0) return 1; // Superbucket or inconsistent used-bits // Extract reversed split-bits revBits <<= superUsed; revBits >>>= 64 - splitCount; return (double)(revBits + 1) / (double)(1L << splitCount); }
BucketId progress = entry.getValue().getProgress(); if (progress.getId() != 0 && superbucket.contains(progress)) { cumulativeSubProgress += superDelta * progressFraction(superbucket, progress);
List<DocEntry> feedDocs(PersistenceProvider spi, Bucket bucket, int numDocs, int minSize, int maxSize) { List<DocEntry> docs = new ArrayList<DocEntry>(); for (int i = 0; i < numDocs; ++i) { Document doc = testDocMan.createRandomDocumentAtLocation( bucket.getBucketId().getId(), i, minSize, maxSize); Result result = spi.put(bucket, 1000 + i, doc); assertTrue(!result.hasError()); docs.add(new DocEntry(1000 + i, doc)); } assertEquals(new Result(), spi.flush(bucket)); return docs; }
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1))); if (pending.equals(rightCheck)) { if (log.isLoggable(LogLevel.SPAM)) {