.withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys));
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
private ColumnFamilyQuery<K, C> prepareQuery() { ColumnFamilyQuery<K, C> query = keyspace.prepareQuery(columnFamily); if (consistencyLevel != null) query.setConsistencyLevel(consistencyLevel); if (retryPolicy != null) query.withRetryPolicy(retryPolicy); return query; }
private ColumnFamilyQuery<K, C> prepareQuery() { ColumnFamilyQuery<K, C> query = keyspace.prepareQuery(columnFamily); if (consistencyLevel != null) query.setConsistencyLevel(consistencyLevel); if (retryPolicy != null) query.withRetryPolicy(retryPolicy); return query; }
@Override protected void internalRun() { try { OperationResult<Rows<K, C>> result = ks.prepareQuery(cfData).withRetryPolicy(retry) .setConsistencyLevel(consistencyLevel).getKeySlice(keys) .withColumnSlice(new ColumnSlice<C>(columnSlice)).execute(); for (Row<K, C> row : result.getResult()) { callback.apply(row); } } catch (ConnectionException e) { e.printStackTrace(); } } };
@Override protected void internalRun() { try { OperationResult<Rows<K, C>> result = ks.prepareQuery(cfData).withRetryPolicy(retry) .setConsistencyLevel(consistencyLevel).getKeySlice(keys) .withColumnSlice(new ColumnSlice<C>(columnSlice)).execute(); for (Row<K, C> row : result.getResult()) { callback.apply(row); } } catch (ConnectionException e) { e.printStackTrace(); } } };
@Override public ByteBuffer readChunk(String objectName, int chunkId) throws Exception { return keyspace.prepareQuery(cf).setConsistencyLevel(readConsistencyLevel).withRetryPolicy(retryPolicy) .getKey(getRowKey(objectName, chunkId)).getColumn(getColumnName(Columns.DATA)).execute().getResult() .getByteBufferValue(); }
@Override public ByteBuffer readChunk(String objectName, int chunkId) throws Exception { return keyspace.prepareQuery(cf).setConsistencyLevel(readConsistencyLevel).withRetryPolicy(retryPolicy) .getKey(getRowKey(objectName, chunkId)).getColumn(getColumnName(Columns.DATA)).execute().getResult() .getByteBufferValue(); }
.withRetryPolicy(new RetryNTimes(5)) .withCql("SELECT * FROM astyanaxunittests.cfdirect;") .execute();
result = ks.prepareQuery(cfIndex).setConsistencyLevel(consistencyLevel).withRetryPolicy(retry) .getKey(shard).withColumnRange(range.setLimit(pageSize).build()).execute().getResult();
result = ks.prepareQuery(cfIndex).setConsistencyLevel(consistencyLevel).withRetryPolicy(retry) .getKey(shard).withColumnRange(range.setLimit(pageSize).build()).execute().getResult();
.withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys));
.withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys));
result = ks.prepareQuery(cfIndex).setConsistencyLevel(consistencyLevel).withRetryPolicy(retry) .getKeySlice(keys).withColumnRange(range.setLimit(columnLimit).build()).execute();
result = ks.prepareQuery(cfIndex).setConsistencyLevel(consistencyLevel).withRetryPolicy(retry) .getKeySlice(keys).withColumnRange(range.setLimit(columnLimit).build()).execute();
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }