public boolean isReverseOrder() { return rowIterator.isReverseOrder(); }
public boolean isReverseOrder() { return rowIterator.isReverseOrder(); }
public boolean isReverseOrder() { return rowIterator.isReverseOrder(); }
private List<KeySlice> thriftifyKeySlices(PartitionIterator results, ColumnParent column_parent, int cellLimit) { try (PartitionIterator iter = results) { List<KeySlice> keySlices = new ArrayList<>(); while (iter.hasNext()) { try (RowIterator partition = iter.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(partition, column_parent.super_column != null, partition.isReverseOrder(), cellLimit); keySlices.add(new KeySlice(partition.partitionKey().getKey(), thriftifiedColumns)); } } return keySlices; } }
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<SinglePartitionReadCommand> commands, boolean subColumnsOnly, int cellLimit, org.apache.cassandra.db.ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { try (PartitionIterator results = read(commands, consistency_level, cState, queryStartNanoTime)) { Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<>(); while (results.hasNext()) { try (RowIterator iter = results.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(iter, subColumnsOnly, iter.isReverseOrder(), cellLimit); columnFamiliesMap.put(iter.partitionKey().getKey(), thriftifiedColumns); } } return columnFamiliesMap; } }
private List<KeySlice> thriftifyKeySlices(PartitionIterator results, ColumnParent column_parent, int cellLimit) { try (PartitionIterator iter = results) { List<KeySlice> keySlices = new ArrayList<>(); while (iter.hasNext()) { try (RowIterator partition = iter.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(partition, column_parent.super_column != null, partition.isReverseOrder(), cellLimit); keySlices.add(new KeySlice(partition.partitionKey().getKey(), thriftifiedColumns)); } } return keySlices; } }
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<SinglePartitionReadCommand> commands, boolean subColumnsOnly, int cellLimit, org.apache.cassandra.db.ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { try (PartitionIterator results = read(commands, consistency_level, cState, queryStartNanoTime)) { Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<>(); while (results.hasNext()) { try (RowIterator iter = results.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(iter, subColumnsOnly, iter.isReverseOrder(), cellLimit); columnFamiliesMap.put(iter.partitionKey().getKey(), thriftifiedColumns); } } return columnFamiliesMap; } }
metadata.cfName, metadata.getKeyValidator().getString(iterator.partitionKey().getKey()), iterator.isReverseOrder());
metadata.cfName, metadata.getKeyValidator().getString(iterator.partitionKey().getKey()), iterator.isReverseOrder());
metadata.cfName, metadata.getKeyValidator().getString(iterator.partitionKey().getKey()), iterator.isReverseOrder());
public static void digest(RowIterator iterator, MessageDigest digest) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); while (iterator.hasNext()) iterator.next().digest(digest); }
metadata.cfName, metadata.getKeyValidator().getString(iterator.partitionKey().getKey()), iterator.isReverseOrder());
public static void digest(RowIterator iterator, MessageDigest digest, MessageDigest altDigest, Set<ByteBuffer> columnsToExclude) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); if (altDigest != null) { // Compute the "alternative digest" here. altDigest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(altDigest, columnsToExclude); iterator.columns().statics.digest(altDigest, columnsToExclude); FBUtilities.updateWithBoolean(altDigest, iterator.isReverseOrder()); iterator.staticRow().digest(altDigest, columnsToExclude); } while (iterator.hasNext()) { Row row = iterator.next(); row.digest(digest); if (altDigest != null) row.digest(altDigest, columnsToExclude); } }
public static void digest(RowIterator iterator, MessageDigest digest, MessageDigest altDigest, Set<ByteBuffer> columnsToExclude) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); if (altDigest != null) { // Compute the "alternative digest" here. altDigest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(altDigest, columnsToExclude); iterator.columns().statics.digest(altDigest, columnsToExclude); FBUtilities.updateWithBoolean(altDigest, iterator.isReverseOrder()); iterator.staticRow().digest(altDigest, columnsToExclude); } while (iterator.hasNext()) { Row row = iterator.next(); row.digest(digest); if (altDigest != null) row.digest(altDigest, columnsToExclude); } }
public static void digest(RowIterator iterator, MessageDigest digest, MessageDigest altDigest, Set<ByteBuffer> columnsToExclude) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); if (altDigest != null) { // Compute the "alternative digest" here. altDigest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(altDigest, columnsToExclude); iterator.columns().statics.digest(altDigest, columnsToExclude); FBUtilities.updateWithBoolean(altDigest, iterator.isReverseOrder()); iterator.staticRow().digest(altDigest, columnsToExclude); } while (iterator.hasNext()) { Row row = iterator.next(); row.digest(digest); if (altDigest != null) row.digest(altDigest, columnsToExclude); } }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
protected static Holder build(RowIterator rows, DeletionInfo deletion, boolean buildEncodingStats, int initialRowCapacity) { CFMetaData metadata = rows.metadata(); PartitionColumns columns = rows.columns(); boolean reversed = rows.isReverseOrder(); BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity); builder.auto(false); while (rows.hasNext()) builder.add(rows.next()); if (reversed) builder.reverse(); Row staticRow = rows.staticRow(); Object[] tree = builder.build(); EncodingStats stats = buildEncodingStats ? EncodingStats.Collector.collect(staticRow, BTree.iterator(tree), deletion) : EncodingStats.NO_STATS; return new Holder(columns, tree, deletion, staticRow, stats); }
throw new NotFoundException(); List<ColumnOrSuperColumn> tcolumns = thriftifyPartition(result, metadata.isSuper() && column_path.column != null, result.isReverseOrder(), 1); if (tcolumns.isEmpty()) throw new NotFoundException();