@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.getSplit", absolute = true) @Override public Iterator<Record> getSplit(Table tbl, String split, @Nullable String fromKeyExclusive, LimitCounter limit, ReadConsistency consistency) { checkNotNull(tbl, "table"); checkNotNull(split, "split"); checkNotNull(limit, "limit"); checkNotNull(consistency, "consistency"); ByteBufferRange splitRange = SplitFormat.decode(split); AstyanaxTable table = (AstyanaxTable) tbl; AstyanaxStorage storage = getStorageForSplit(table, splitRange); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBufferRange keyRange = storage.getSplitRange(splitRange, fromKeyExclusive, split); // The fromKeyExclusive might be equal to the end token of the split. If so, there's nothing to return. if (keyRange.getStart().equals(keyRange.getEnd())) { return Iterators.emptyIterator(); } // In contrast to the scan() method, scan a single range prefix (the one associated with this split). return touch(decodeRows( rowScan(placement, keyRange, _maxColumnsRange, limit, consistency), table, _maxColumnsRange.getLimit(), consistency)); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.getSplit", absolute = true) @Override public Iterator<Record> getSplit(Table tbl, String split, @Nullable String fromKeyExclusive, LimitCounter limit, ReadConsistency consistency) { checkNotNull(tbl, "table"); checkNotNull(split, "split"); checkNotNull(limit, "limit"); checkNotNull(consistency, "consistency"); ByteBufferRange splitRange = SplitFormat.decode(split); AstyanaxTable table = (AstyanaxTable) tbl; AstyanaxStorage storage = getStorageForSplit(table, splitRange); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBufferRange keyRange = storage.getSplitRange(splitRange, fromKeyExclusive, split); // The fromKeyExclusive might be equal to the end token of the split. If so, there's nothing to return. if (keyRange.getStart().equals(keyRange.getEnd())) { return Iterators.emptyIterator(); } // In contrast to the scan() method, scan a single range prefix (the one associated with this split). return touch(decodeRows( rowScan(placement, keyRange, _maxColumnsRange, limit, consistency), table, _maxColumnsRange.getLimit(), consistency)); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, UUID> rows = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, UUID> rows = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, DeltaKey> rows = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, DeltaKey> rows = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
@Override public AllRowsQuery<K, C> withColumnRange(ByteBufferRange range) { if (range instanceof CqlRangeImpl) { this.columnSlice = new CqlColumnSlice<C>(); ((CqlColumnSlice<C>) this.columnSlice).setCqlRange((CqlRangeImpl<C>) range); return this; } else { return this.withColumnRange(range.getStart(), range.getEnd(), range.isReversed(), range.getLimit()); } }
@SuppressWarnings("unchecked") @Override public RowSliceQuery<K, C> withColumnRange(ByteBufferRange range) { colQueryType = ColumnSliceQueryType.ColumnRange; if (range instanceof CompositeByteBufferRange) { this.compositeRange = (CompositeByteBufferRange) range; } else if (range instanceof CompositeRangeBuilder) { this.compositeRange = ((CompositeRangeBuilder)range).build(); } else if (range instanceof CqlRangeImpl) { this.columnSlice.setCqlRange((CqlRangeImpl<C>) range); } else { return this.withColumnRange(range.getStart(), range.getEnd(), range.isReversed(), range.getLimit()); } return this; }
@SuppressWarnings("unchecked") @Override public RowQuery<K, C> withColumnRange(ByteBufferRange range) { queryType = RowQueryType.ColumnRange; if (range instanceof CompositeByteBufferRange) { this.compositeRange = (CompositeByteBufferRange) range; } else if (range instanceof CompositeRangeBuilder) { this.compositeRange = ((CompositeRangeBuilder)range).build(); } else if (range instanceof CqlRangeImpl) { this.columnSlice.setCqlRange((CqlRangeImpl<C>) range); } else { return this.withColumnRange(range.getStart(), range.getEnd(), range.isReversed(), range.getLimit()); } return this; }
@Override public RowQuery<K, C> withColumnRange(ByteBufferRange range) { predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd()) .setCount(range.getLimit()).setReversed(range.isReversed())); return this; } }
@Override public RowSliceQuery<K, C> withColumnRange(ByteBufferRange range) { predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd()) .setCount(range.getLimit()).setReversed(range.isReversed())); return this; }
@Override public RowSliceQuery<K, C> withColumnRange(ByteBufferRange range) { predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd()) .setCount(range.getLimit()).setReversed(range.isReversed())); return this; }
@Override public IndexQuery<K, C> withColumnRange(ByteBufferRange range) { predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd()) .setCount(range.getLimit()).setReversed(range.isReversed())); return this; }
@Override public IndexQuery<K, C> withColumnRange(ByteBufferRange range) { predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd()) .setCount(range.getLimit()).setReversed(range.isReversed())); return this; }
@Override public AllRowsQuery<K, C> withColumnRange(ByteBufferRange range) { predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd()) .setCount(range.getLimit()).setReversed(range.isReversed())); return this; }
@Override public AllRowsQuery<K, C> withColumnRange(ByteBufferRange range) { predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd()) .setCount(range.getLimit()).setReversed(range.isReversed())); return this; }
@Override public RowQuery<K, C> withColumnRange(ByteBufferRange range) { predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd()) .setCount(range.getLimit()).setReversed(range.isReversed())); return this; } }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<DeltaKey> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<UUID> columns = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<DeltaKey> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }