public IntervalSet<T> build() { return new IntervalSet<T>(ImmutableSortedMap.copyOfSorted(ranges)); } }
public Optional<T> lowerBound() { return isEmpty() ? Optional.empty() : Optional.of(ranges.firstKey()); }
/** * consult the known-persisted ranges for our sstables; * if the position is covered by one of them it does not need to be replayed * * @return true iff replay is necessary */ private boolean shouldReplay(UUID cfId, CommitLogPosition position) { return !cfPersisted.get(cfId).contains(position); }
public int serializedSize(Version version, StatsMetadata component) throws IOException { int size = 0; size += EstimatedHistogram.serializer.serializedSize(component.estimatedPartitionSize); size += EstimatedHistogram.serializer.serializedSize(component.estimatedColumnCount); size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.upperBound().orElse(CommitLogPosition.NONE)); if (version.storeRows()) size += 8 + 8 + 4 + 4 + 4 + 4 + 8 + 8; // mix/max timestamp(long), min/maxLocalDeletionTime(int), min/max TTL, compressionRatio(double), repairedAt (long) else size += 8 + 8 + 4 + 8 + 8; // mix/max timestamp(long), maxLocalDeletionTime(int), compressionRatio(double), repairedAt (long) size += StreamingHistogram.serializer.serializedSize(component.estimatedTombstoneDropTime); size += TypeSizes.sizeof(component.sstableLevel); // min column names size += 4; for (ByteBuffer value : component.minClusteringValues) size += 2 + value.remaining(); // with short length // max column names size += 4; for (ByteBuffer value : component.maxClusteringValues) size += 2 + value.remaining(); // with short length size += TypeSizes.sizeof(component.hasLegacyCounterShards); if (version.storeRows()) size += 8 + 8; // totalColumnsSet, totalRows if (version.hasCommitLogLowerBound()) size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.lowerBound().orElse(CommitLogPosition.NONE)); if (version.hasCommitLogIntervals()) size += commitLogPositionSetSerializer.serializedSize(component.commitLogIntervals); return size; }
/** * Find the earliest commit log position that is not covered by the known flushed ranges for some table. * * For efficiency this assumes that the first contiguously flushed interval we know of contains the moment that the * given table was constructed* and hence we can start replay from the end of that interval. * * If such an interval is not known, we must replay from the beginning. * * * This is not true only until if the very first flush of a table stalled or failed, while the second or latter * succeeded. The chances of this happening are at most very low, and if the assumption does prove to be * incorrect during replay there is little chance that the affected deployment is in production. */ public static CommitLogPosition firstNotCovered(Collection<IntervalSet<CommitLogPosition>> ranges) { return ranges.stream() .map(intervals -> Iterables.getFirst(intervals.ends(), CommitLogPosition.NONE)) .min(Ordering.natural()) .get(); // iteration is per known-CF, there must be at least one. }
public static StatsMetadata defaultStatsMetadata() { return new StatsMetadata(defaultPartitionSizeHistogram(), defaultCellPerPartitionCountHistogram(), IntervalSet.empty(), Long.MIN_VALUE, Long.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, NO_COMPRESSION_RATIO, defaultTombstoneDropTimeHistogramBuilder().build(), 0, Collections.<ByteBuffer>emptyList(), Collections.<ByteBuffer>emptyList(), true, ActiveRepairService.UNREPAIRED_SSTABLE, -1, -1); }
public int serializedSize(Version version, StatsMetadata component) throws IOException { int size = 0; size += EstimatedHistogram.serializer.serializedSize(component.estimatedPartitionSize); size += EstimatedHistogram.serializer.serializedSize(component.estimatedColumnCount); size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.upperBound().orElse(CommitLogPosition.NONE)); if (version.storeRows()) size += 8 + 8 + 4 + 4 + 4 + 4 + 8 + 8; // mix/max timestamp(long), min/maxLocalDeletionTime(int), min/max TTL, compressionRatio(double), repairedAt (long) else size += 8 + 8 + 4 + 8 + 8; // mix/max timestamp(long), maxLocalDeletionTime(int), compressionRatio(double), repairedAt (long) size += StreamingHistogram.serializer.serializedSize(component.estimatedTombstoneDropTime); size += TypeSizes.sizeof(component.sstableLevel); // min column names size += 4; for (ByteBuffer value : component.minClusteringValues) size += 2 + value.remaining(); // with short length // max column names size += 4; for (ByteBuffer value : component.maxClusteringValues) size += 2 + value.remaining(); // with short length size += TypeSizes.sizeof(component.hasLegacyCounterShards); if (version.storeRows()) size += 8 + 8; // totalColumnsSet, totalRows if (version.hasCommitLogLowerBound()) size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.lowerBound().orElse(CommitLogPosition.NONE)); if (version.hasCommitLogIntervals()) size += commitLogPositionSetSerializer.serializedSize(component.commitLogIntervals); return size; }
/** * Find the earliest commit log position that is not covered by the known flushed ranges for some table. * * For efficiency this assumes that the first contiguously flushed interval we know of contains the moment that the * given table was constructed* and hence we can start replay from the end of that interval. * * If such an interval is not known, we must replay from the beginning. * * * This is not true only until if the very first flush of a table stalled or failed, while the second or latter * succeeded. The chances of this happening are at most very low, and if the assumption does prove to be * incorrect during replay there is little chance that the affected deployment is in production. */ public static CommitLogPosition firstNotCovered(Collection<IntervalSet<CommitLogPosition>> ranges) { return ranges.stream() .map(intervals -> Iterables.getFirst(intervals.ends(), CommitLogPosition.NONE)) .min(Ordering.natural()) .get(); // iteration is per known-CF, there must be at least one. }
public static StatsMetadata defaultStatsMetadata() { return new StatsMetadata(defaultPartitionSizeHistogram(), defaultCellPerPartitionCountHistogram(), IntervalSet.empty(), Long.MIN_VALUE, Long.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, NO_COMPRESSION_RATIO, defaultTombstoneDropTimeHistogramBuilder().build(), 0, Collections.<ByteBuffer>emptyList(), Collections.<ByteBuffer>emptyList(), true, ActiveRepairService.UNREPAIRED_SSTABLE, -1, -1); }
public int serializedSize(Version version, StatsMetadata component) throws IOException { int size = 0; size += EstimatedHistogram.serializer.serializedSize(component.estimatedPartitionSize); size += EstimatedHistogram.serializer.serializedSize(component.estimatedColumnCount); size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.upperBound().orElse(CommitLogPosition.NONE)); if (version.storeRows()) size += 8 + 8 + 4 + 4 + 4 + 4 + 8 + 8; // mix/max timestamp(long), min/maxLocalDeletionTime(int), min/max TTL, compressionRatio(double), repairedAt (long) else size += 8 + 8 + 4 + 8 + 8; // mix/max timestamp(long), maxLocalDeletionTime(int), compressionRatio(double), repairedAt (long) size += StreamingHistogram.serializer.serializedSize(component.estimatedTombstoneDropTime); size += TypeSizes.sizeof(component.sstableLevel); // min column names size += 4; for (ByteBuffer value : component.minClusteringValues) size += 2 + value.remaining(); // with short length // max column names size += 4; for (ByteBuffer value : component.maxClusteringValues) size += 2 + value.remaining(); // with short length size += TypeSizes.sizeof(component.hasLegacyCounterShards); if (version.storeRows()) size += 8 + 8; // totalColumnsSet, totalRows if (version.hasCommitLogLowerBound()) size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.lowerBound().orElse(CommitLogPosition.NONE)); if (version.hasCommitLogIntervals()) size += commitLogPositionSetSerializer.serializedSize(component.commitLogIntervals); return size; }
public IntervalSet<T> build() { return new IntervalSet<T>(ImmutableSortedMap.copyOfSorted(ranges)); } }
public Optional<T> upperBound() { return isEmpty() ? Optional.empty() : Optional.of(ranges.lastEntry().getValue()); }
/** * consult the known-persisted ranges for our sstables; * if the position is covered by one of them it does not need to be replayed * * @return true iff replay is necessary */ private boolean shouldReplay(UUID cfId, CommitLogPosition position) { return !cfPersisted.get(cfId).contains(position); }
/** * Find the earliest commit log position that is not covered by the known flushed ranges for some table. * * For efficiency this assumes that the first contiguously flushed interval we know of contains the moment that the * given table was constructed* and hence we can start replay from the end of that interval. * * If such an interval is not known, we must replay from the beginning. * * * This is not true only until if the very first flush of a table stalled or failed, while the second or latter * succeeded. The chances of this happening are at most very low, and if the assumption does prove to be * incorrect during replay there is little chance that the affected deployment is in production. */ public static CommitLogPosition firstNotCovered(Collection<IntervalSet<CommitLogPosition>> ranges) { return ranges.stream() .map(intervals -> Iterables.getFirst(intervals.ends(), CommitLogPosition.NONE)) .min(Ordering.natural()) .get(); // iteration is per known-CF, there must be at least one. }
public static StatsMetadata defaultStatsMetadata() { return new StatsMetadata(defaultPartitionSizeHistogram(), defaultCellPerPartitionCountHistogram(), IntervalSet.empty(), Long.MIN_VALUE, Long.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, NO_COMPRESSION_RATIO, defaultTombstoneDropTimeHistogramBuilder().build(), 0, Collections.<ByteBuffer>emptyList(), Collections.<ByteBuffer>emptyList(), true, ActiveRepairService.UNREPAIRED_SSTABLE, -1, -1); }
public int serializedSize(Version version, StatsMetadata component) throws IOException { int size = 0; size += EstimatedHistogram.serializer.serializedSize(component.estimatedPartitionSize); size += EstimatedHistogram.serializer.serializedSize(component.estimatedColumnCount); size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.upperBound().orElse(CommitLogPosition.NONE)); if (version.storeRows()) size += 8 + 8 + 4 + 4 + 4 + 4 + 8 + 8; // mix/max timestamp(long), min/maxLocalDeletionTime(int), min/max TTL, compressionRatio(double), repairedAt (long) else size += 8 + 8 + 4 + 8 + 8; // mix/max timestamp(long), maxLocalDeletionTime(int), compressionRatio(double), repairedAt (long) size += StreamingHistogram.serializer.serializedSize(component.estimatedTombstoneDropTime); size += TypeSizes.sizeof(component.sstableLevel); // min column names size += 4; for (ByteBuffer value : component.minClusteringValues) size += 2 + value.remaining(); // with short length // max column names size += 4; for (ByteBuffer value : component.maxClusteringValues) size += 2 + value.remaining(); // with short length size += TypeSizes.sizeof(component.hasLegacyCounterShards); if (version.storeRows()) size += 8 + 8; // totalColumnsSet, totalRows if (version.hasCommitLogLowerBound()) size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.lowerBound().orElse(CommitLogPosition.NONE)); if (version.hasCommitLogIntervals()) size += commitLogPositionSetSerializer.serializedSize(component.commitLogIntervals); return size; }
public IntervalSet<T> build() { return new IntervalSet<T>(ImmutableSortedMap.copyOfSorted(ranges)); } }
public Optional<T> lowerBound() { return isEmpty() ? Optional.empty() : Optional.of(ranges.firstKey()); }
/** * consult the known-persisted ranges for our sstables; * if the position is covered by one of them it does not need to be replayed * * @return true iff replay is necessary */ private boolean shouldReplay(UUID cfId, CommitLogPosition position) { return !cfPersisted.get(cfId).contains(position); }
CommitLogPosition.serializer.serialize(stats.commitLogIntervals.upperBound().orElse(CommitLogPosition.NONE), out); out.writeLong(stats.minTimestamp); out.writeLong(stats.maxTimestamp); ByteBufferUtil.writeWithShortLength(value, out); if (version.hasCommitLogLowerBound()) CommitLogPosition.serializer.serialize(stats.commitLogIntervals.lowerBound().orElse(CommitLogPosition.NONE), out); if (version.hasCommitLogIntervals()) commitLogPositionSetSerializer.serialize(stats.commitLogIntervals, out);