public Row next() { Row row = this.rowIterator.next(); lastClustering = row.clustering(); return row; } }
public Row next() { Row row = this.rowIterator.next(); lastClustering = row.clustering(); return row; } }
public Row next() { Row row = this.rowIterator.next(); lastClustering = row.clustering(); return row; } }
public static void consume(PartitionIterator iterator) { while (iterator.hasNext()) { try (RowIterator partition = iterator.next()) { while (partition.hasNext()) partition.next(); } } }
public static void consume(PartitionIterator iterator) { while (iterator.hasNext()) { try (RowIterator partition = iterator.next()) { while (partition.hasNext()) partition.next(); } } }
public static void consume(PartitionIterator iterator) { while (iterator.hasNext()) { try (RowIterator partition = iterator.next()) { while (partition.hasNext()) partition.next(); } } }
public static void digest(RowIterator iterator, MessageDigest digest) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); while (iterator.hasNext()) iterator.next().digest(digest); }
@Override public void flush() { if (rowcument != null) { switch(transactionType) { case CLEANUP: this.rowcument.delete(); break; case COMPACTION: // remove expired row or reindex a doc when a column has expired, happen only when index_on_compaction=true for at least one elasticsearch index. case UPDATE: if (rowcument.hasMissingFields()) { SinglePartitionReadCommand command = SinglePartitionReadCommand.fullPartitionRead(baseCfs.metadata, nowInSec, key); RowIterator rowIt = read(command); if (rowIt.hasNext()) try { this.rowcument = new SkinnyRowcument(rowIt.next(), null); } catch (IOException e) { logger.error("Unexpected error", e); } } if (this.rowcument.hasLiveData(nowInSec)) { this.rowcument.index(); } else { this.rowcument.delete(); } } } } }
public static void digest(RowIterator iterator, MessageDigest digest, MessageDigest altDigest, Set<ByteBuffer> columnsToExclude) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); if (altDigest != null) { // Compute the "alternative digest" here. altDigest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(altDigest, columnsToExclude); iterator.columns().statics.digest(altDigest, columnsToExclude); FBUtilities.updateWithBoolean(altDigest, iterator.isReverseOrder()); iterator.staticRow().digest(altDigest, columnsToExclude); } while (iterator.hasNext()) { Row row = iterator.next(); row.digest(digest); if (altDigest != null) row.digest(altDigest, columnsToExclude); } }
public static void digest(RowIterator iterator, MessageDigest digest, MessageDigest altDigest, Set<ByteBuffer> columnsToExclude) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); if (altDigest != null) { // Compute the "alternative digest" here. altDigest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(altDigest, columnsToExclude); iterator.columns().statics.digest(altDigest, columnsToExclude); FBUtilities.updateWithBoolean(altDigest, iterator.isReverseOrder()); iterator.staticRow().digest(altDigest, columnsToExclude); } while (iterator.hasNext()) { Row row = iterator.next(); row.digest(digest); if (altDigest != null) row.digest(altDigest, columnsToExclude); } }
public static void digest(RowIterator iterator, MessageDigest digest, MessageDigest altDigest, Set<ByteBuffer> columnsToExclude) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); if (altDigest != null) { // Compute the "alternative digest" here. altDigest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(altDigest, columnsToExclude); iterator.columns().statics.digest(altDigest, columnsToExclude); FBUtilities.updateWithBoolean(altDigest, iterator.isReverseOrder()); iterator.staticRow().digest(altDigest, columnsToExclude); } while (iterator.hasNext()) { Row row = iterator.next(); row.digest(digest); if (altDigest != null) row.digest(altDigest, columnsToExclude); } }
private static long readTypeTimestamp(String keyspaceName, String typeName) { ColumnFamilyStore store = org.apache.cassandra.db.Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME) .getColumnFamilyStore(SystemKeyspace.LEGACY_USERTYPES); ClusteringComparator comparator = store.metadata.comparator; Slices slices = Slices.with(comparator, Slice.make(comparator, typeName)); int nowInSec = FBUtilities.nowInSeconds(); DecoratedKey key = store.metadata.decorateKey(AsciiType.instance.fromString(keyspaceName)); SinglePartitionReadCommand command = SinglePartitionReadCommand.create(store.metadata, nowInSec, key, slices); try (ReadExecutionController controller = command.executionController(); RowIterator partition = UnfilteredRowIterators.filter(command.queryMemtableAndDisk(store, controller), nowInSec)) { return partition.next().primaryKeyLivenessInfo().timestamp(); } }
private static long readTypeTimestamp(String keyspaceName, String typeName) { ColumnFamilyStore store = org.apache.cassandra.db.Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME) .getColumnFamilyStore(SystemKeyspace.LEGACY_USERTYPES); ClusteringComparator comparator = store.metadata.comparator; Slices slices = Slices.with(comparator, Slice.make(comparator, typeName)); int nowInSec = FBUtilities.nowInSeconds(); DecoratedKey key = store.metadata.decorateKey(AsciiType.instance.fromString(keyspaceName)); SinglePartitionReadCommand command = SinglePartitionReadCommand.create(store.metadata, nowInSec, key, slices); try (ReadExecutionController controller = command.executionController(); RowIterator partition = UnfilteredRowIterators.filter(command.queryMemtableAndDisk(store, controller), nowInSec)) { return partition.next().primaryKeyLivenessInfo().timestamp(); } }
private static long readTypeTimestamp(String keyspaceName, String typeName) { ColumnFamilyStore store = org.apache.cassandra.db.Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME) .getColumnFamilyStore(SystemKeyspace.LEGACY_USERTYPES); ClusteringComparator comparator = store.metadata.comparator; Slices slices = Slices.with(comparator, Slice.make(comparator, typeName)); int nowInSec = FBUtilities.nowInSeconds(); DecoratedKey key = store.metadata.decorateKey(AsciiType.instance.fromString(keyspaceName)); SinglePartitionReadCommand command = SinglePartitionReadCommand.create(store.metadata, nowInSec, key, slices); try (ReadExecutionController controller = command.executionController(); RowIterator partition = UnfilteredRowIterators.filter(command.queryMemtableAndDisk(store, controller), nowInSec)) { return partition.next().primaryKeyLivenessInfo().timestamp(); } }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
private void updateWithCurrentValuesFromCFS(List<PartitionUpdate.CounterMark> marks, ColumnFamilyStore cfs) { ColumnFilter.Builder builder = ColumnFilter.selectionBuilder(); BTreeSet.Builder<Clustering> names = BTreeSet.builder(cfs.metadata.comparator); for (PartitionUpdate.CounterMark mark : marks) { if (mark.clustering() != Clustering.STATIC_CLUSTERING) names.add(mark.clustering()); if (mark.path() == null) builder.add(mark.column()); else builder.select(mark.column(), mark.path()); } int nowInSec = FBUtilities.nowInSeconds(); ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(names.build(), false); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(cfs.metadata, nowInSec, key(), builder.build(), filter); PeekingIterator<PartitionUpdate.CounterMark> markIter = Iterators.peekingIterator(marks.iterator()); try (ReadExecutionController controller = cmd.executionController(); RowIterator partition = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) { updateForRow(markIter, partition.staticRow(), cfs); while (partition.hasNext()) { if (!markIter.hasNext()) return; updateForRow(markIter, partition.next(), cfs); } } }
cell = iter.next().getCell(column);