@Override public Map<UUID, String> loadSegments(String queue) { Map<UUID, String> resultMap = Maps.newHashMap(); Iterator<Column<UUID>> iter = executePaginated( _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(queue) .withColumnRange(new RangeBuilder().setLimit(100).build()) .autoPaginate(true)); while (iter.hasNext()) { Column<UUID> column = iter.next(); resultMap.put(column.getName(), column.getStringValue()); } return resultMap; }
@Override public Map<UUID, String> loadSegments(String queue) { Map<UUID, String> resultMap = Maps.newHashMap(); Iterator<Column<UUID>> iter = executePaginated( _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(queue) .withColumnRange(new RangeBuilder().setLimit(100).build()) .autoPaginate(true)); while (iter.hasNext()) { Column<UUID> column = iter.next(); resultMap.put(column.getName(), column.getStringValue()); } return resultMap; }
@Nullable @Override public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) { // Use a column range with a "start" to skip past tombstones. ColumnList<ByteBuffer> columns = execute(_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(dataId) .withColumnRange(new RangeBuilder() .setStart(Objects.firstNonNull(from, EMPTY_BUFFER)) .setLimit(1) .build())); return !columns.isEmpty() ? columns.getColumnByIndex(0).getName() : null; }
@Nullable @Override public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) { // Use a column range with a "start" to skip past tombstones. ColumnList<ByteBuffer> columns = execute(_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(dataId) .withColumnRange(new RangeBuilder() .setStart(Objects.firstNonNull(from, EMPTY_BUFFER)) .setLimit(1) .build())); return !columns.isEmpty() ? columns.getColumnByIndex(0).getName() : null; }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, DeltaKey> rows = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, UUID> rows = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, DeltaKey> rows = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
/** * Queries for rows given an enumerated list of Cassandra row keys. */ private Iterator<Record> rowQuery(DeltaPlacement placement, List<Map.Entry<ByteBuffer, Key>> keys, ReadConsistency consistency) { // Build the list of row IDs to query for. List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction()); // Query for Delta & Compaction info, just the first 50 columns for now. final Rows<ByteBuffer, UUID> rows = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKeySlice(rowIds) .withColumnRange(_maxColumnsRange), "query %d keys from placement %s", rowIds.size(), placement.getName()); // Track metrics _randomReadMeter.mark(rowIds.size()); // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once. return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency); }
@Override public Iterator<ByteBuffer> scanRecords(UUID dataId, @Nullable ByteBuffer from, @Nullable final ByteBuffer to, int batchSize, int limit) { final Iterator<Column<ByteBuffer>> iter = executePaginated( _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(dataId) .withColumnRange(new RangeBuilder() .setStart(Objects.firstNonNull(from, EMPTY_BUFFER)) .setEnd(Objects.firstNonNull(to, EMPTY_BUFFER)) .setLimit(batchSize) .build()) .autoPaginate(true)); return Iterators.limit(new AbstractIterator<ByteBuffer>() { @Override protected ByteBuffer computeNext() { while (iter.hasNext()) { ByteBuffer record = iter.next().getName(); if (!record.equals(to)) { // To is exclusive return record; } } return endOfData(); } }, limit); }
@Override public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) { // Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since // we always delete smaller column values before deleting larger column values--scanning will hit the max // before needing to skip over tombstones. Map<UUID, ByteBuffer> resultMap = Maps.newHashMap(); for (List<UUID> batch : Iterables.partition(dataIds, 10)) { Rows<UUID, ByteBuffer> rows = execute( _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKeySlice(batch) .withColumnRange(new RangeBuilder() .setReversed(true) .setLimit(1) .build())); for (Row<UUID, ByteBuffer> row : rows) { UUID dataId = row.getKey(); for (Column<ByteBuffer> column : row.getColumns()) { resultMap.put(dataId, column.getName()); } } } return resultMap; }
@Override public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) { // Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since // we always delete smaller column values before deleting larger column values--scanning will hit the max // before needing to skip over tombstones. Map<UUID, ByteBuffer> resultMap = Maps.newHashMap(); for (List<UUID> batch : Iterables.partition(dataIds, 10)) { Rows<UUID, ByteBuffer> rows = execute( _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKeySlice(batch) .withColumnRange(new RangeBuilder() .setReversed(true) .setLimit(1) .build())); for (Row<UUID, ByteBuffer> row : rows) { UUID dataId = row.getKey(); for (Column<ByteBuffer> column : row.getColumns()) { resultMap.put(dataId, column.getName()); } } } return resultMap; }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<UUID> columns = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<DeltaKey> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<UUID> columns = execute(placement.getKeyspace() .prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<DeltaKey> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
@Override public Iterator<String> listQueues() { final Iterator<Row<String, UUID>> rowIter = execute( _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM) .getAllRows() .setRowLimit(100) .withColumnRange(new RangeBuilder().setLimit(1).build())) .iterator(); return new AbstractIterator<String>() { @Override protected String computeNext() { while (rowIter.hasNext()) { Row<String, UUID> row = rowIter.next(); if (!row.getColumns().isEmpty()) { return row.getKey(); } } return endOfData(); } }; }
@Override public Iterator<String> listChannels() { final Iterator<Row<String, ByteBuffer>> rowIter = execute( _keyspace.prepareQuery(ColumnFamilies.MANIFEST, ConsistencyLevel.CL_LOCAL_QUORUM) .getAllRows() .setRowLimit(1000) .withColumnRange(new RangeBuilder().setLimit(1).build())) .iterator(); return new AbstractIterator<String>() { @Override protected String computeNext() { while (rowIter.hasNext()) { Row<String, ByteBuffer> row = rowIter.next(); if (!row.getColumns().isEmpty()) { return row.getKey(); } } return endOfData(); } }; }
@Override public Iterator<String> listQueues() { final Iterator<Row<String, UUID>> rowIter = execute( _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM) .getAllRows() .setRowLimit(100) .withColumnRange(new RangeBuilder().setLimit(1).build())) .iterator(); return new AbstractIterator<String>() { @Override protected String computeNext() { while (rowIter.hasNext()) { Row<String, UUID> row = rowIter.next(); if (!row.getColumns().isEmpty()) { return row.getKey(); } } return endOfData(); } }; }
@Override public Iterator<String> listChannels() { final Iterator<Row<String, ByteBuffer>> rowIter = execute( _keyspace.prepareQuery(ColumnFamilies.MANIFEST, ConsistencyLevel.CL_LOCAL_QUORUM) .getAllRows() .setRowLimit(1000) .withColumnRange(new RangeBuilder().setLimit(1).build())) .iterator(); return new AbstractIterator<String>() { @Override protected String computeNext() { while (rowIter.hasNext()) { Row<String, ByteBuffer> row = rowIter.next(); if (!row.getColumns().isEmpty()) { return row.getKey(); } } return endOfData(); } }; }
@ParameterizedTimed(type="AstyanaxStorageProvider") @Override public StorageSummary readMetadata(Table tbl, String blobId) { AstyanaxTable table = (AstyanaxTable) checkNotNull(tbl, "table"); AstyanaxStorage storage = table.getReadStorage(); BlobPlacement placement = (BlobPlacement) storage.getPlacement(); // Do a column range query on all the A and B columns. Don't get the Z columns with the binary data. Composite start = getColumnPrefix(ColumnGroup.A, Composite.ComponentEquality.LESS_THAN_EQUAL); Composite end = getColumnPrefix(ColumnGroup.B, Composite.ComponentEquality.GREATER_THAN_EQUAL); ColumnList<Composite> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlobColumnFamily(), _readConsistency) .getKey(storage.getRowKey(blobId)) .withColumnRange(start, end, false, Integer.MAX_VALUE)); StorageSummary summary = toStorageSummary(columns); if (summary == null) { return null; } // Cleanup older versions of the blob, if any (unlikely). deleteOldColumns(table, blobId, columns, summary.getTimestamp()); return summary; }