public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, int sstableLevel, SerializationHeader header, LifecycleTransaction txn) { MetadataCollector collector = new MetadataCollector(metadata.comparator).sstableLevel(sstableLevel); return createSSTableMultiWriter(descriptor, keyCount, repairedAt, collector, header, txn); }
@Override protected void doPrepare() { syncInternal(); digestFile.ifPresent(crcMetadata::writeFullChecksum); sstableMetadataCollector.addCompressionRatio(compressedSize, uncompressedSize); metadataWriter.finalizeLength(current(), chunkCount).prepareToCommit(); }
private void afterAppend(DecoratedKey decoratedKey, long dataEnd, RowIndexEntry index, ByteBuffer indexInfo) throws IOException { metadataCollector.addKey(decoratedKey.getKey()); lastWrittenKey = decoratedKey; last = lastWrittenKey; if (first == null) first = lastWrittenKey; if (logger.isTraceEnabled()) logger.trace("wrote {} at {}", decoratedKey, dataEnd); iwriter.append(decoratedKey, index, dataEnd, indexInfo); }
public SSTableWriter createFlushWriter(String filename) throws ExecutionException, InterruptedException { MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.metadata.comparator).replayPosition(context); return new SSTableWriter(filename, rows.size(), ActiveRepairService.UNREPAIRED_SSTABLE, cfs.metadata, cfs.partitioner, sstableMetadataCollector); } }
public MetadataCollector(Collection<SSTableReader> sstables, CellNameType columnNameComparator, int level) { this(columnNameComparator); replayPosition(ReplayPosition.getReplayPosition(sstables)); sstableLevel(level); // Get the max timestamp of the precompacted sstables // and adds generation of live ancestors for (SSTableReader sstable : sstables) { addAncestor(sstable.descriptor.generation); for (Integer i : sstable.getAncestors()) if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists()) addAncestor(i); } }
private SSTableWriter createCompactionWriter(File sstableDirectory, long keysPerSSTable, long repairedAt) { assert sstableDirectory != null; return new SSTableWriter(cfs.getTempSSTablePath(sstableDirectory), keysPerSSTable, repairedAt, cfs.metadata, cfs.partitioner, new MetadataCollector(sstables, cfs.metadata.comparator, getLevel())); }
public MetadataCollector(Iterable<SSTableReader> sstables, ClusteringComparator comparator, int level) { this(comparator); IntervalSet.Builder<CommitLogPosition> intervals = new IntervalSet.Builder<>(); for (SSTableReader sstable : sstables) { intervals.addAll(sstable.getSSTableMetadata().commitLogIntervals); } commitLogIntervals(intervals.build()); sstableLevel(level); }
@Override public RangeTombstoneMarker applyToMarker(RangeTombstoneMarker marker) { collector.updateClusteringValues(marker.clustering()); if (marker.isBoundary()) { RangeTombstoneBoundaryMarker bm = (RangeTombstoneBoundaryMarker)marker; collector.update(bm.endDeletionTime()); collector.update(bm.startDeletionTime()); } else { collector.update(((RangeTombstoneBoundMarker)marker).deletionTime()); } return marker; }
public SSTableMultiWriter createFlushWriter(LifecycleTransaction txn, String filename, PartitionColumns columns, EncodingStats stats) { MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.metadata.comparator) .commitLogIntervals(new IntervalSet<>(commitLogLowerBound.get(), commitLogUpperBound.get())); return cfs.createSSTableMultiWriter(Descriptor.fromFilename(filename), toFlush.size(), ActiveRepairService.UNREPAIRED_SSTABLE, sstableMetadataCollector, new SerializationHeader(true, cfs.metadata, columns, stats), txn); }
private SSTableWriter createCompactionWriter(long repairedAt) { MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.getComparator()); // Get the max timestamp of the precompacted sstables // and adds generation of live ancestors sstableMetadataCollector.addAncestor(sstable.descriptor.generation); for (Integer i : sstable.getAncestors()) { if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists()) sstableMetadataCollector.addAncestor(i); } sstableMetadataCollector.sstableLevel(sstable.getSSTableLevel()); return new SSTableWriter(cfs.getTempSSTablePath(directory), estimatedRows, repairedAt, cfs.metadata, cfs.partitioner, sstableMetadataCollector); }
protected Map<MetadataType, MetadataComponent> finalizeMetadata() { return metadataCollector.finalizeMetadata(getPartitioner().getClass().getCanonicalName(), metadata.params.bloomFilterFpChance, repairedAt, header); }
@Override public DeletionTime applyToDeletion(DeletionTime deletionTime) { collector.update(deletionTime); return deletionTime; } }
public Map<MetadataType, MetadataComponent> deserialize( Descriptor descriptor, EnumSet<MetadataType> types) throws IOException { Map<MetadataType, MetadataComponent> components; logger.trace("Load metadata for {}", descriptor); File statsFile = new File(descriptor.filenameFor(Component.STATS)); if (!statsFile.exists()) { logger.trace("No sstable stats for {}", descriptor); components = new EnumMap<>(MetadataType.class); components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata()); } else { try (RandomAccessReader r = RandomAccessReader.open(statsFile)) { components = deserialize(descriptor, r, types); } } return components; }
@Override public void onPartitionClose() { collector.addCellPerPartitionCount(cellCount); }
long rowSize = endPosition - startPosition; maybeLogLargePartitionWarning(key, rowSize); metadataCollector.addPartitionSizeInBytes(rowSize); afterAppend(key, endPosition, entry, columnIndexWriter.buffer()); return entry;
protected SSTableWriter getWriter() { return new SSTableWriter( makeFilename(directory, metadata.ksName, metadata.cfName), 0, // We don't care about the bloom filter ActiveRepairService.UNREPAIRED_SSTABLE, metadata, DatabaseDescriptor.getPartitioner(), new MetadataCollector(metadata.comparator)); }
public MetadataCollector(Iterable<SSTableReader> sstables, ClusteringComparator comparator, int level) { this(comparator); IntervalSet.Builder<CommitLogPosition> intervals = new IntervalSet.Builder<>(); for (SSTableReader sstable : sstables) { intervals.addAll(sstable.getSSTableMetadata().commitLogIntervals); } commitLogIntervals(intervals.build()); sstableLevel(level); }
@Override public RangeTombstoneMarker applyToMarker(RangeTombstoneMarker marker) { collector.updateClusteringValues(marker.clustering()); if (marker.isBoundary()) { RangeTombstoneBoundaryMarker bm = (RangeTombstoneBoundaryMarker)marker; collector.update(bm.endDeletionTime()); collector.update(bm.startDeletionTime()); } else { collector.update(((RangeTombstoneBoundMarker)marker).deletionTime()); } return marker; }
public SSTableMultiWriter createFlushWriter(LifecycleTransaction txn, String filename, PartitionColumns columns, EncodingStats stats) { MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.metadata.comparator) .commitLogIntervals(new IntervalSet<>(commitLogLowerBound.get(), commitLogUpperBound.get())); return cfs.createSSTableMultiWriter(Descriptor.fromFilename(filename), toFlush.size(), ActiveRepairService.UNREPAIRED_SSTABLE, sstableMetadataCollector, new SerializationHeader(true, cfs.metadata, columns, stats), txn); }
protected Map<MetadataType, MetadataComponent> finalizeMetadata() { return metadataCollector.finalizeMetadata(getPartitioner().getClass().getCanonicalName(), metadata.params.bloomFilterFpChance, repairedAt, header); }