public static void digest(RowIterator iterator, MessageDigest digest) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); while (iterator.hasNext()) iterator.next().digest(digest); }
public FilteredPartition(RowIterator rows) { super(rows.metadata(), rows.partitionKey(), build(rows, DeletionInfo.LIVE, false, 16)); }
public boolean hasNext() { if (rowIterator.hasNext()) return true; DecoratedKey partitionKey = rowIterator.partitionKey(); rowIterator.close(); // Fetch the next RowIterator GroupByPartitionIterator.this.hasNext(); // if the previous page was ending within the partition the // next RowIterator is the continuation of this one if (next != null && partitionKey.equals(next.partitionKey())) { rowIterator = next; next = null; return rowIterator.hasNext(); } closed = true; return false; }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
private void updateWithCurrentValuesFromCFS(List<PartitionUpdate.CounterMark> marks, ColumnFamilyStore cfs) { ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); BTreeSet.Builder<Clustering> names = BTreeSet.builder(cfs.metadata.comparator); for (PartitionUpdate.CounterMark mark : marks) { if (mark.clustering() != Clustering.STATIC_CLUSTERING) names.add(mark.clustering()); if (mark.path() == null) builder.add(mark.column()); else builder.select(mark.column(), mark.path()); } int nowInSec = FBUtilities.nowInSeconds(); ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(names.build(), false); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key(), builder.build(), filter); PeekingIterator<PartitionUpdate.CounterMark> markIter = Iterators.peekingIterator(marks.iterator()); try (ReadExecutionController controller = cmd.executionController(); RowIterator partition = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) { updateForRow(markIter, partition.staticRow(), cfs); while (partition.hasNext()) { if (!markIter.hasNext()) return; updateForRow(markIter, partition.next(), cfs); } } }
ByteBuffer[] keyComponents = getComponents(cfm, partition.partitionKey()); Row staticRow = partition.staticRow(); if (!partition.hasNext()) result.newRow(partition.partitionKey(), staticRow.clustering()); for (ColumnDefinition def : selection.getColumns()) while (partition.hasNext()) Row row = partition.next(); result.newRow( partition.partitionKey(), row.clustering());
CFMetaData metadata = iterator.metadata(); logger.info("[{}] Logging iterator on {}.{}, partition key={}, reversed={}", id, metadata.ksName, metadata.cfName, metadata.getKeyValidator().getString(iterator.partitionKey().getKey()), iterator.isReverseOrder());
private List<KeySlice> thriftifyKeySlices(PartitionIterator results, ColumnParent column_parent, int cellLimit) { try (PartitionIterator iter = results) { List<KeySlice> keySlices = new ArrayList<>(); while (iter.hasNext()) { try (RowIterator partition = iter.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(partition, column_parent.super_column != null, partition.isReverseOrder(), cellLimit); keySlices.add(new KeySlice(partition.partitionKey().getKey(), thriftifiedColumns)); } } return keySlices; } }
if (!result.hasNext()) throw new NotFoundException(); List<ColumnOrSuperColumn> tcolumns = thriftifyPartition(result, metadata.isSuper() && column_path.column != null, result.isReverseOrder(), 1); if (tcolumns.isEmpty()) throw new NotFoundException();
public Row next() { Row row = this.rowIterator.next(); lastClustering = row.clustering(); return row; } }
public DecoratedKey partitionKey() { return rowIterator.partitionKey(); }
public void close() { if (!closed) rowIterator.close(); }
private List<ColumnOrSuperColumn> thriftifyPartition(RowIterator partition, boolean subcolumnsOnly, boolean reversed, int cellLimit) { if (partition.isEmpty()) return EMPTY_COLUMNS; Iterator<LegacyLayout.LegacyCell> cells = LegacyLayout.fromRowIterator(partition).right; List<ColumnOrSuperColumn> result; if (partition.metadata().isSuper()) { boolean isCounterCF = partition.metadata().isCounter(); result = thriftifySuperColumns(partition.metadata(), cells, subcolumnsOnly, isCounterCF, reversed); } else { result = thriftifyColumns(partition.metadata(), cells); } // Thrift count cells, but internally we only count them at "row" boundaries, which means that if the limit stops in the middle // of an internal row we'll include a few additional cells. So trim it here. return result.size() > cellLimit ? result.subList(0, cellLimit) : result; }
public CFMetaData metadata() { return rowIterator.metadata(); }
public Row staticRow() { Row row = rowIterator.staticRow(); lastClustering = null; return row; }
public PartitionColumns columns() { return rowIterator.columns(); }
public boolean isReverseOrder() { return rowIterator.isReverseOrder(); }