public IndexSummary build(IPartitioner partitioner) { return build(partitioner, null); }
IndexSummaryBuilder.ReadableBoundary getMaxReadable() { return summary.getLastReadableBoundary(); }
public void run() { summary.markDataSynced(dataFile.getLastFlushOffset()); } });
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException { // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary. RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX))); try { long indexSize = primaryIndex.length(); try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.params.minIndexInterval, newSamplingLevel)) { long indexPosition; while ((indexPosition = primaryIndex.getFilePointer()) != indexSize) { summaryBuilder.maybeAddEntry(decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition); RowIndexEntry.Serializer.skip(primaryIndex, descriptor.version); } return summaryBuilder.build(getPartitioner()); } } finally { FileUtils.closeQuietly(primaryIndex); } }
IndexWriter(long keyCount) { indexFile = new SequentialWriter(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), writerOption); builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).mmapped(DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.mmap); chunkCache.ifPresent(builder::withChunkCache); summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL); bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder()); // register listeners to be alerted when the data files are flushed indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset())); dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset())); }
protected void doPrepare() { flushBf(); // truncate index file //long position = indexFile.position(); indexFile.prepareToCommit(); //FileUtils.truncate(indexFile.getPath(), position); // save summary summary.prepareToCommit(); try (IndexSummary indexSummary = summary.build(getPartitioner())) { SSTableReader.saveSummary(descriptor, first, last, indexSummary); } }
int newSamplingLevel = IndexSummaryBuilder.calculateSamplingLevel(currentSamplingLevel, currentNumEntries, targetNumEntries, minIndexInterval, maxIndexInterval); int numEntriesAtNewSamplingLevel = IndexSummaryBuilder.entriesAtSamplingLevel(newSamplingLevel, maxSummarySize); double effectiveIndexInterval = sstable.getEffectiveIndexInterval(); sstable, effectiveIndexInterval, maxIndexInterval); newSamplingLevel = Math.max(1, (BASE_SAMPLING_LEVEL * minIndexInterval) / maxIndexInterval); numEntriesAtNewSamplingLevel = IndexSummaryBuilder.entriesAtSamplingLevel(newSamplingLevel, sstable.getMaxIndexSummarySize()); long spaceUsed = (long) Math.ceil(avgEntrySize * numEntriesAtNewSamplingLevel); forceUpsample.add(new ResampleEntry(sstable, spaceUsed, newSamplingLevel));
public void markIndexSynced(long upToPosition) { indexSyncPosition = upToPosition; refreshReadableBoundary(); }
public IndexSummaryBuilder maybeAddEntry(DecoratedKey decoratedKey, long indexStart) throws IOException { return maybeAddEntry(decoratedKey, indexStart, 0, 0); }
if ((entries.length() + getEntrySize(decoratedKey)) <= Integer.MAX_VALUE) setNextSamplePosition(keysWritten);
@Override protected Throwable doPostCleanup(Throwable accumulate) { accumulate = summary.close(accumulate); accumulate = bf.close(accumulate); accumulate = builder.close(accumulate); return accumulate; } }
/** * * @param decoratedKey the key for this record * @param indexStart the position in the index file this record begins * @param indexEnd the position in the index file we need to be able to read to (exclusive) to read this record * @param dataEnd the position in the data file we need to be able to read to (exclusive) to read this record * a value of 0 indicates we are not tracking readable boundaries */ public IndexSummaryBuilder maybeAddEntry(DecoratedKey decoratedKey, long indexStart, long indexEnd, long dataEnd) { if (keysWritten == nextSamplePosition) { assert entries.length() <= Integer.MAX_VALUE; offsets.writeInt((int) entries.length()); entries.write(decoratedKey.getKey()); entries.writeLong(indexStart); setNextSamplePosition(keysWritten); } else if (dataEnd != 0 && keysWritten + 1 == nextSamplePosition) { // this is the last key in this summary interval, so stash it ReadableBoundary boundary = new ReadableBoundary(decoratedKey, indexEnd, dataEnd, (int)(offsets.length() / 4), entries.length()); lastReadableByData.put(dataEnd, boundary); lastReadableByIndex.put(indexEnd, boundary); } keysWritten++; return this; }
public void run() { summary.markIndexSynced(indexFile.getLastFlushOffset()); } });
IndexWriter(long keyCount, final SequentialWriter dataFile) { indexFile = SequentialWriter.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX))); builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode()); summary = new IndexSummaryBuilder(keyCount, metadata.getMinIndexInterval(), Downsampling.BASE_SAMPLING_LEVEL); bf = FilterFactory.getFilter(keyCount, metadata.getBloomFilterFpChance(), true); // register listeners to be alerted when the data files are flushed indexFile.setPostFlushListener(new Runnable() { public void run() { summary.markIndexSynced(indexFile.getLastFlushOffset()); } }); dataFile.setPostFlushListener(new Runnable() { public void run() { summary.markDataSynced(dataFile.getLastFlushOffset()); } }); }
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException { // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary. RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX))); try { long indexSize = primaryIndex.length(); try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.params.minIndexInterval, newSamplingLevel)) { long indexPosition; while ((indexPosition = primaryIndex.getFilePointer()) != indexSize) { summaryBuilder.maybeAddEntry(decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition); RowIndexEntry.Serializer.skip(primaryIndex, descriptor.version); } return summaryBuilder.build(getPartitioner()); } } finally { FileUtils.closeQuietly(primaryIndex); } }
IndexWriter(long keyCount) { indexFile = new SequentialWriter(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), writerOption); builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).mmapped(DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.mmap); chunkCache.ifPresent(builder::withChunkCache); summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL); bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder()); // register listeners to be alerted when the data files are flushed indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset())); dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset())); }
protected void doPrepare() { flushBf(); // truncate index file long position = indexFile.position(); indexFile.prepareToCommit(); FileUtils.truncate(indexFile.getPath(), position); // save summary summary.prepareToCommit(); try (IndexSummary indexSummary = summary.build(getPartitioner())) { SSTableReader.saveSummary(descriptor, first, last, indexSummary); } }
int newSamplingLevel = IndexSummaryBuilder.calculateSamplingLevel(currentSamplingLevel, currentNumEntries, targetNumEntries, minIndexInterval, maxIndexInterval); int numEntriesAtNewSamplingLevel = IndexSummaryBuilder.entriesAtSamplingLevel(newSamplingLevel, maxSummarySize); double effectiveIndexInterval = sstable.getEffectiveIndexInterval(); sstable, effectiveIndexInterval, maxIndexInterval); newSamplingLevel = Math.max(1, (BASE_SAMPLING_LEVEL * minIndexInterval) / maxIndexInterval); numEntriesAtNewSamplingLevel = IndexSummaryBuilder.entriesAtSamplingLevel(newSamplingLevel, sstable.getMaxIndexSummarySize()); long spaceUsed = (long) Math.ceil(avgEntrySize * numEntriesAtNewSamplingLevel); forceUpsample.add(new ResampleEntry(sstable, spaceUsed, newSamplingLevel));
public void markIndexSynced(long upToPosition) { indexSyncPosition = upToPosition; refreshReadableBoundary(); }