@Override public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> batch, StoreTransaction txh) throws BackendException { MutationBatch m = keyspaceContext.getClient().prepareMutationBatch().withAtomicBatch(atomicBatch) .setConsistencyLevel(getTx(txh).getWriteConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate());
.setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys));
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
@Override public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> batch, StoreTransaction txh) throws BackendException { MutationBatch m = keyspaceContext.getClient().prepareMutationBatch().withAtomicBatch(atomicBatch) .setConsistencyLevel(getTx(txh).getWriteConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate());
@Override public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> batch, StoreTransaction txh) throws BackendException { MutationBatch m = keyspaceContext.getClient().prepareMutationBatch().withAtomicBatch(atomicBatch) .setConsistencyLevel(getTx(txh).getWriteConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate());
.setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys));
.setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys));
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }