public DecoratedKey partitionKey() { return rowIterator.partitionKey(); }
public DecoratedKey partitionKey() { return rowIterator.partitionKey(); }
public DecoratedKey partitionKey() { return rowIterator.partitionKey(); }
private Map<DecoratedKey, Partition> asMaterializedMap(PartitionIterator iterator) { Map<DecoratedKey, Partition> map = new HashMap<>(); while (iterator.hasNext()) { try (RowIterator partition = iterator.next()) { map.put(partition.partitionKey(), FilteredPartition.create(partition)); } } return map; }
private Map<DecoratedKey, Partition> asMaterializedMap(PartitionIterator iterator) { Map<DecoratedKey, Partition> map = new HashMap<>(); while (iterator.hasNext()) { try (RowIterator partition = iterator.next()) { map.put(partition.partitionKey(), FilteredPartition.create(partition)); } } return map; }
private Map<DecoratedKey, Partition> asMaterializedMap(PartitionIterator iterator) { Map<DecoratedKey, Partition> map = new HashMap<>(); while (iterator.hasNext()) { try (RowIterator partition = iterator.next()) { map.put(partition.partitionKey(), FilteredPartition.create(partition)); } } return map; }
public FilteredPartition(RowIterator rows) { super(rows.metadata(), rows.partitionKey(), build(rows, DeletionInfo.LIVE, false, 16)); }
public FilteredPartition(RowIterator rows) { super(rows.metadata(), rows.partitionKey(), build(rows, DeletionInfo.LIVE, false, 16)); }
public FilteredPartition(RowIterator rows) { super(rows.metadata(), rows.partitionKey(), build(rows, DeletionInfo.LIVE, false, 16)); }
public FilteredPartition(RowIterator rows) { super(rows.metadata(), rows.partitionKey(), build(rows, DeletionInfo.LIVE, false, 16)); }
public final RowIterator next() { if (!hasNext()) throw new NoSuchElementException(); RowIterator iterator = new GroupByRowIterator(next); lastPartitionKey = iterator.partitionKey().getKey(); next = null; return iterator; }
public final RowIterator next() { if (!hasNext()) throw new NoSuchElementException(); RowIterator iterator = new GroupByRowIterator(next); lastPartitionKey = iterator.partitionKey().getKey(); next = null; return iterator; }
public final RowIterator next() { if (!hasNext()) throw new NoSuchElementException(); RowIterator iterator = new GroupByRowIterator(next); lastPartitionKey = iterator.partitionKey().getKey(); next = null; return iterator; }
public boolean hasNext() { if (rowIterator.hasNext()) return true; DecoratedKey partitionKey = rowIterator.partitionKey(); rowIterator.close(); // Fetch the next RowIterator GroupByPartitionIterator.this.hasNext(); // if the previous page was ending within the partition the // next RowIterator is the continuation of this one if (next != null && partitionKey.equals(next.partitionKey())) { rowIterator = next; next = null; return rowIterator.hasNext(); } closed = true; return false; }
private List<KeySlice> thriftifyKeySlices(PartitionIterator results, ColumnParent column_parent, int cellLimit) { try (PartitionIterator iter = results) { List<KeySlice> keySlices = new ArrayList<>(); while (iter.hasNext()) { try (RowIterator partition = iter.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(partition, column_parent.super_column != null, partition.isReverseOrder(), cellLimit); keySlices.add(new KeySlice(partition.partitionKey().getKey(), thriftifiedColumns)); } } return keySlices; } }
private List<KeySlice> thriftifyKeySlices(PartitionIterator results, ColumnParent column_parent, int cellLimit) { try (PartitionIterator iter = results) { List<KeySlice> keySlices = new ArrayList<>(); while (iter.hasNext()) { try (RowIterator partition = iter.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(partition, column_parent.super_column != null, partition.isReverseOrder(), cellLimit); keySlices.add(new KeySlice(partition.partitionKey().getKey(), thriftifiedColumns)); } } return keySlices; } }
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<SinglePartitionReadCommand> commands, boolean subColumnsOnly, int cellLimit, org.apache.cassandra.db.ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { try (PartitionIterator results = read(commands, consistency_level, cState, queryStartNanoTime)) { Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<>(); while (results.hasNext()) { try (RowIterator iter = results.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(iter, subColumnsOnly, iter.isReverseOrder(), cellLimit); columnFamiliesMap.put(iter.partitionKey().getKey(), thriftifiedColumns); } } return columnFamiliesMap; } }
private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<SinglePartitionReadCommand> commands, boolean subColumnsOnly, int cellLimit, org.apache.cassandra.db.ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { try (PartitionIterator results = read(commands, consistency_level, cState, queryStartNanoTime)) { Map<ByteBuffer, List<ColumnOrSuperColumn>> columnFamiliesMap = new HashMap<>(); while (results.hasNext()) { try (RowIterator iter = results.next()) { List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyPartition(iter, subColumnsOnly, iter.isReverseOrder(), cellLimit); columnFamiliesMap.put(iter.partitionKey().getKey(), thriftifiedColumns); } } return columnFamiliesMap; } }
/** * Turns the given iterator into an update. * * @param iterator the iterator to turn into updates. * @param filter the column filter used when querying {@code iterator}. This is used to make * sure we don't include data for which the value has been skipped while reading (as we would * then be writing something incorrect). * * Warning: this method does not close the provided iterator, it is up to * the caller to close it. */ public static PartitionUpdate fromIterator(RowIterator iterator, ColumnFilter filter) { iterator = RowIterators.withOnlyQueriedData(iterator, filter); MutableDeletionInfo deletionInfo = MutableDeletionInfo.live(); Holder holder = build(iterator, deletionInfo, true, 16); return new PartitionUpdate(iterator.metadata(), iterator.partitionKey(), holder, deletionInfo, false); }
public static void digest(RowIterator iterator, MessageDigest digest) { // TODO: we're not computing digest the same way that old nodes. This is // currently ok as this is only used for schema digest and the is no exchange // of schema digest between different versions. If this changes however, // we'll need to agree on a version. digest.update(iterator.partitionKey().getKey().duplicate()); iterator.columns().regulars.digest(digest); iterator.columns().statics.digest(digest); FBUtilities.updateWithBoolean(digest, iterator.isReverseOrder()); iterator.staticRow().digest(digest); while (iterator.hasNext()) iterator.next().digest(digest); }