public static CLevel parse(String value) { Preconditions.checkArgument(value != null && !value.isEmpty()); value = value.trim(); if (value.equals("1")) return ONE; else if (value.equals("2")) return TWO; else if (value.equals("3")) return THREE; else { for (CLevel c : values()) { if (c.toString().equalsIgnoreCase(value) || ("CL_" + c.toString()).equalsIgnoreCase(value)) return c; } } throw new IllegalArgumentException("Unrecognized cassandra consistency level: " + value); } }
@Override public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> batch, StoreTransaction txh) throws BackendException { MutationBatch m = keyspaceContext.getClient().prepareMutationBatch().withAtomicBatch(atomicBatch) .setConsistencyLevel(getTx(txh).getWriteConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate());
mutate(new ArrayList<org.apache.cassandra.db.Mutation>(rowMutations.values()), getTx(txh).getWriteConsistencyLevel().getDB());
@Test public void testReadConsistencyLevel() { int levelsChecked = 0; // Test whether CassandraTransaction honors the write consistency level option for (CLevel writeLevel : CLevel.values()) { StandardBaseTransactionConfig.Builder b = new StandardBaseTransactionConfig.Builder(); ModifiableConfiguration mc = GraphDatabaseConfiguration.buildGraphConfiguration(); mc.set(CASSANDRA_READ_CONSISTENCY, writeLevel.name()); b.timestampProvider(TimestampProviders.MICRO); b.customOptions(mc); CassandraTransaction ct = new CassandraTransaction(b.build()); assertEquals(writeLevel, ct.getReadConsistencyLevel()); levelsChecked++; } // Sanity check: if CLevel.values was empty, something is wrong with the test Preconditions.checkState(0 < levelsChecked); }
ConsistencyLevel consistency = getTx(txh).getWriteConsistencyLevel().getThrift();
@Test public void testWriteConsistencyLevel() { int levelsChecked = 0; // Test whether CassandraTransaction honors the write consistency level option for (CLevel writeLevel : CLevel.values()) { StandardBaseTransactionConfig.Builder b = new StandardBaseTransactionConfig.Builder(); ModifiableConfiguration mc = GraphDatabaseConfiguration.buildGraphConfiguration(); mc.set(CASSANDRA_WRITE_CONSISTENCY, writeLevel.name()); b.customOptions(mc); b.timestampProvider(TimestampProviders.MICRO); CassandraTransaction ct = new CassandraTransaction(b.build()); assertEquals(writeLevel, ct.getWriteConsistencyLevel()); levelsChecked++; } // Sanity check: if CLevel.values was empty, something is wrong with the test Preconditions.checkState(0 < levelsChecked); }
ConsistencyLevel consistency = getTx(txh).getReadConsistencyLevel().getThrift(); SlicePredicate predicate = new SlicePredicate(); SliceRange range = new SliceRange();
public static CLevel parse(String value) { Preconditions.checkArgument(value != null && !value.isEmpty()); value = value.trim(); if (value.equals("1")) return ONE; else if (value.equals("2")) return TWO; else if (value.equals("3")) return THREE; else { for (CLevel c : values()) { if (c.toString().equalsIgnoreCase(value) || ("CL_" + c.toString()).equalsIgnoreCase(value)) return c; } } throw new IllegalArgumentException("Unrecognized cassandra consistency level: " + value); } }
.setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys));
ReadCommand sliceCmd = new SliceFromReadCommand(keyspace, query.getKey().asByteBuffer(), columnFamily, nowMillis, sqf); List<Row> slice = read(sliceCmd, getTx(txh).getReadConsistencyLevel().getDB());
ConsistencyLevel consistency = getTx(txh).getWriteConsistencyLevel().getThrift();
public static CLevel parse(String value) { Preconditions.checkArgument(value != null && !value.isEmpty()); value = value.trim(); if (value.equals("1")) return ONE; else if (value.equals("2")) return TWO; else if (value.equals("3")) return THREE; else { for (CLevel c : values()) { if (c.toString().equalsIgnoreCase(value) || ("CL_" + c.toString()).equalsIgnoreCase(value)) return c; } } throw new IllegalArgumentException("Unrecognized cassandra consistency level: " + value); } }
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
mutate(new ArrayList<org.apache.cassandra.db.Mutation>(rowMutations.values()), getTx(txh).getWriteConsistencyLevel().getDB());