@Override protected Unfiltered computeNext() { Unfiltered ret = super.computeNext(); if (firstItemRetrieved) return ret; // Check that the lower bound is not bigger than the first item retrieved firstItemRetrieved = true; if (lowerBound != null && ret != null) assert comparator().compare(lowerBound, ret.clustering()) <= 0 : String.format("Lower bound [%s ]is bigger than first returned value [%s] for sstable %s", lowerBound.toString(sstable.metadata), ret.toString(sstable.metadata), sstable.getFilename()); return ret; }
public boolean nextIsRow() throws IOException { if (!hasNext()) throw new IllegalStateException(); return next.isRow(); }
default boolean isRangeTombstoneMarker() { return kind() == Kind.RANGE_TOMBSTONE_MARKER; } }
@Override public Unfiltered next() { Unfiltered next = nextToOffer != null ? nextToOffer : wrapped.next(); if (next.isRow()) { while (wrapped.hasNext()) { Unfiltered peek = wrapped.next(); if (!peek.isRow() || !next.clustering().equals(peek.clustering())) { nextToOffer = peek; // Offer peek in next call return next; } // Duplicate row, merge it. next = Rows.merge((Row) next, (Row) peek, FBUtilities.nowInSeconds()); } } nextToOffer = null; return next; } }
if (cmp < 0) if (dataNext.isRow()) next = ((Row) dataNext).filter(cf, activeDeletionTime, false, metadata); else if (dataNext.isRow()) if (tombNext.isRangeTombstoneMarker())
private void add(Unfiltered unfiltered) throws IOException { long pos = currentPosition(); if (firstClustering == null) { // Beginning of an index block. Remember the start and position firstClustering = unfiltered.clustering(); startPosition = pos; } UnfilteredSerializer.serializer.serialize(unfiltered, header, writer, pos - previousRowStart, version); // notify observers about each new row if (!observers.isEmpty()) observers.forEach((o) -> o.nextUnfilteredCluster(unfiltered)); lastClustering = unfiltered.clustering(); previousRowStart = pos; ++written; if (unfiltered.kind() == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) { RangeTombstoneMarker marker = (RangeTombstoneMarker) unfiltered; openMarker = marker.isOpen(false) ? marker.openDeletionTime(false) : null; } // if we hit the column index size that we have to index after, go ahead and index it. if (currentPosition() - startPosition >= DatabaseDescriptor.getColumnIndexSize()) addIndexBlock(); }
protected Unfiltered computeNext() throws IOException { assert deserializer != null; while (true) { // We use a same reasoning as in handlePreSliceData regarding the strictness of the inequality below. // We want to exclude deserialized unfiltered equal to end, because 1) we won't miss any rows since those // woudn't be equal to a slice bound and 2) a end bound can be equal to a start bound // (EXCL_END(x) == INCL_START(x) for instance) and in that case we don't want to return start bound because // it's fundamentally excluded. And if the bound is a end (for a range tombstone), it means it's exactly // our slice end, but in that case we will properly close the range tombstone anyway as part of our "close // an open marker" code in hasNextInterna if (!deserializer.hasNext() || deserializer.compareNextTo(end) >= 0) return null; Unfiltered next = deserializer.readNext(); // We may get empty row for the same reason expressed on UnfilteredSerializer.deserializeOne. if (next.isEmpty()) continue; if (next.kind() == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) updateOpenMarker((RangeTombstoneMarker) next); return next; } }
protected Unfiltered computeNext() { if (!iterator.hasNext()) return endOfData(); Unfiltered next = iterator.next(); if (!next.isRow()) return next; if (hasNegativeLocalExpirationTime((Row) next)) { outputHandler.debug(String.format("Found row with negative local expiration time: %s", next.toString(metadata(), false))); negativeLocalExpirationTimeMetrics.fixedRows++; return fixNegativeLocalExpirationTime((Row) next); } return next; }
public int compareNextTo(ClusteringBound bound) throws IOException { if (!hasNext()) throw new IllegalStateException(); return metadata.comparator.compare(next.clustering(), bound); }
private Collection<Mutation> buildNext() { while (updatesIter.hasNext()) { Unfiltered update = updatesIter.next(); // If it's a range tombstone, it removes nothing pre-exisiting, so we can ignore it for view updates if (update.isRangeTombstoneMarker()) continue; Row updateRow = (Row) update; addToViewUpdateGenerators(emptyRow(updateRow.clustering(), existingsDeletion.currentDeletion()), updateRow, generators, nowInSec); // If the updates have been filtered, then we won't have any mutations; we need to make sure that we // only return if the mutations are empty. Otherwise, we continue to search for an update which is // not filtered Collection<Mutation> mutations = buildMutations(baseTableMetadata, generators); if (!mutations.isEmpty()) return mutations; } return null; }
if (!unfiltered.isEmpty() && (!isFirst || includeFirst)) buffer.add(unfiltered); if (unfiltered.isRangeTombstoneMarker()) updateOpenMarker((RangeTombstoneMarker)unfiltered);
private String toDebugString(UnfilteredRowIterator partition, CFMetaData metadata) { StringBuilder sb = new StringBuilder(); sb.append(String.format("[%s.%s] key=%s partition_deletion=%s columns=%s", metadata.ksName, metadata.cfName, metadata.getKeyValidator().getString(partition.partitionKey().getKey()), partition.partitionLevelDeletion(), partition.columns())); if (partition.staticRow() != Rows.EMPTY_STATIC_ROW) sb.append("\n ").append(partition.staticRow().toString(metadata, true)); while (partition.hasNext()) sb.append("\n ").append(partition.next().toString(metadata, true)); return sb.toString(); }
/** * Deserialize an {@link Unfiltered} from the provided input. * * @param in the input from which to deserialize. * @param header serialization header corresponding to the serialized data. * @param helper the helper to use for deserialization. * @param builder a row builder, passed here so we don't allocate a new one for every new row. * @return the deserialized {@link Unfiltered} or {@code null} if we've read the end of a partition. This method is * guaranteed to never return empty rows. */ public Unfiltered deserialize(DataInputPlus in, SerializationHeader header, SerializationHelper helper, Row.Builder builder) throws IOException { while (true) { Unfiltered unfiltered = deserializeOne(in, header, helper, builder); if (unfiltered == null) return null; // Skip empty rows, see deserializeOne javadoc if (!unfiltered.isEmpty()) return unfiltered; } }
unfiltered.digest(digest);
if (cmp < 0) if (dataNext.isRow()) next = ((Row) dataNext).filter(cf, activeDeletionTime, false, metadata); else if (dataNext.isRow()) if (tombNext.isRangeTombstoneMarker())
private void add(Unfiltered unfiltered) throws IOException { long pos = currentPosition(); if (firstClustering == null) { // Beginning of an index block. Remember the start and position firstClustering = unfiltered.clustering(); startPosition = pos; } UnfilteredSerializer.serializer.serialize(unfiltered, header, writer, pos - previousRowStart, version); // notify observers about each new row if (!observers.isEmpty()) observers.forEach((o) -> o.nextUnfilteredCluster(unfiltered)); lastClustering = unfiltered.clustering(); previousRowStart = pos; ++written; if (unfiltered.kind() == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) { RangeTombstoneMarker marker = (RangeTombstoneMarker) unfiltered; openMarker = marker.isOpen(false) ? marker.openDeletionTime(false) : null; } // if we hit the column index size that we have to index after, go ahead and index it. if (currentPosition() - startPosition >= DatabaseDescriptor.getColumnIndexSize()) addIndexBlock(); }
protected Unfiltered computeNext() throws IOException { assert deserializer != null; while (true) { // We use a same reasoning as in handlePreSliceData regarding the strictness of the inequality below. // We want to exclude deserialized unfiltered equal to end, because 1) we won't miss any rows since those // woudn't be equal to a slice bound and 2) a end bound can be equal to a start bound // (EXCL_END(x) == INCL_START(x) for instance) and in that case we don't want to return start bound because // it's fundamentally excluded. And if the bound is a end (for a range tombstone), it means it's exactly // our slice end, but in that case we will properly close the range tombstone anyway as part of our "close // an open marker" code in hasNextInterna if (!deserializer.hasNext() || deserializer.compareNextTo(end) >= 0) return null; Unfiltered next = deserializer.readNext(); // We may get empty row for the same reason expressed on UnfilteredSerializer.deserializeOne. if (next.isEmpty()) continue; if (next.kind() == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) updateOpenMarker((RangeTombstoneMarker) next); return next; } }
@Override public Unfiltered next() { Unfiltered next = nextToOffer != null ? nextToOffer : wrapped.next(); if (next.isRow()) { while (wrapped.hasNext()) { Unfiltered peek = wrapped.next(); if (!peek.isRow() || !next.clustering().equals(peek.clustering())) { nextToOffer = peek; // Offer peek in next call return next; } // Duplicate row, merge it. next = Rows.merge((Row) next, (Row) peek, FBUtilities.nowInSeconds()); } } nextToOffer = null; return next; } }
protected Unfiltered computeNext() { if (!iterator.hasNext()) return endOfData(); Unfiltered next = iterator.next(); if (!next.isRow()) return next; if (hasNegativeLocalExpirationTime((Row) next)) { outputHandler.debug(String.format("Found row with negative local expiration time: %s", next.toString(metadata(), false))); negativeLocalExpirationTimeMetrics.fixedRows++; return fixNegativeLocalExpirationTime((Row) next); } return next; }
public int compareNextTo(ClusteringBound bound) throws IOException { if (!hasNext()) throw new IllegalStateException(); return metadata.comparator.compare(next.clustering(), bound); }