keyspace.prepareQuery( cf ).setConsistencyLevel( consistencyLevel ).getKeySlice( rowKeys ) .withColumnRange( rangeBuilder.build() );
keyspace.prepareQuery( cf ).setConsistencyLevel( consistencyLevel ).getKeySlice( (R) currentShard.getRowKey() ) .withColumnRange( rangeBuilder.build() );
keyspace.prepareQuery( GRAPH_DELETE ).setConsistencyLevel( fig.getReadCL() );
@Override public Optional<Long> getMaxVersion( final ApplicationScope scope, final Id node ) { ValidationUtils.validateApplicationScope( scope ); ValidationUtils.verifyIdentity( node ); ColumnFamilyQuery<ScopedRowKey<Id>, Boolean> query = keyspace.prepareQuery( GRAPH_DELETE ).setConsistencyLevel( fig.getReadCL() ); Column<Boolean> result = null; try { result = query.getKey( ScopedRowKey.fromKey( scope.getApplication(), node ) ).getColumn( COLUMN_NAME ).execute() .getResult(); } catch(NotFoundException nfe){ //swallow, there's just no column return Optional.absent(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to connect to casandra", e ); } return Optional.of( result.getLongValue() ); }
@Override public Health getHealth() { try { ColumnFamily<String, String> CF_SYSTEM_LOCAL = new ColumnFamily<String, String>( "system.local", StringSerializer.get(), StringSerializer.get(), StringSerializer.get() ); OperationResult<CqlResult<String, String>> result = keyspace.prepareQuery( CF_SYSTEM_LOCAL ) .setConsistencyLevel(ConsistencyLevel.CL_ONE) .withCql( "SELECT now() FROM system.local;" ) .execute(); if ( result.getResult().getRows().size() > 0 ) { return Health.GREEN; } } catch ( ConnectionException ex ) { logger.error( "Error connecting to Cassandra", ex ); } return Health.RED; } }
@Override public Iterator<Shard> getShardMetaData( final ApplicationScope scope, final Optional<Shard> start, final DirectedEdgeMeta metaData ) { ValidationUtils.validateApplicationScope( scope ); GraphValidation.validateDirectedEdgeMeta( metaData ); Preconditions.checkNotNull( metaData, "metadata must be present" ); /** * If the edge is present, we need to being seeking from this */ final RangeBuilder rangeBuilder = new RangeBuilder().setLimit( graphFig.getScanPageSize() ); if ( start.isPresent() ) { final Shard shard = start.get(); GraphValidation.valiateShard( shard ); rangeBuilder.setStart( shard.getShardIndex() ); } final ScopedRowKey rowKey = ScopedRowKey.fromKey( scope.getApplication(), metaData ); final RowQuery<ScopedRowKey<DirectedEdgeMeta>, Long> query = keyspace.prepareQuery( EDGE_SHARDS ).setConsistencyLevel( cassandraConfig.getReadCL() ).getKey( rowKey ) .autoPaginate( true ).withColumnRange( rangeBuilder.build() ); return new ColumnNameIterator<>( query, COLUMN_PARSER, false ); }
.setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys));
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
private ColumnFamilyQuery<K, C> prepareQuery() { ColumnFamilyQuery<K, C> query = keyspace.prepareQuery(columnFamily); if (consistencyLevel != null) query.setConsistencyLevel(consistencyLevel); return query; }
private ColumnFamilyQuery<K, C> prepareQuery() { ColumnFamilyQuery<K, C> query = keyspace.prepareQuery(columnFamily); if (consistencyLevel != null) query.setConsistencyLevel(consistencyLevel); if (retryPolicy != null) query.withRetryPolicy(retryPolicy); return query; }
private ColumnFamilyQuery<K, C> prepareQuery() { ColumnFamilyQuery<K, C> query = keyspace.prepareQuery(columnFamily); if (consistencyLevel != null) query.setConsistencyLevel(consistencyLevel); if (retryPolicy != null) query.withRetryPolicy(retryPolicy); return query; }
/** * Starting point for constructing a query. From the column family the * client can perform all 4 types of queries: get column, get key slice, get * key range and and index query. */ public <K, C> ColumnFamilyQuery<K, C> prepareQuery(ColumnFamily<K, C> cf, ConsistencyLevel consistency) { return _astyanaxKeyspace.prepareQuery(cf).setConsistencyLevel(clamp(consistency)); }
/** * Starting point for constructing a query. From the column family the * client can perform all 4 types of queries: get column, get key slice, get * key range and and index query. */ public <K, C> ColumnFamilyQuery<K, C> prepareQuery(ColumnFamily<K, C> cf, ConsistencyLevel consistency) { return _astyanaxKeyspace.prepareQuery(cf).setConsistencyLevel(clamp(consistency)); }
@Override protected void internalRun() { try { OperationResult<Rows<K, C>> result = ks.prepareQuery(cfData).withRetryPolicy(retry) .setConsistencyLevel(consistencyLevel).getKeySlice(keys) .withColumnSlice(new ColumnSlice<C>(columnSlice)).execute(); for (Row<K, C> row : result.getResult()) { callback.apply(row); } } catch (ConnectionException e) { e.printStackTrace(); } } };
@Override protected void internalRun() { try { OperationResult<Rows<K, C>> result = ks.prepareQuery(cfData).withRetryPolicy(retry) .setConsistencyLevel(consistencyLevel).getKeySlice(keys) .withColumnSlice(new ColumnSlice<C>(columnSlice)).execute(); for (Row<K, C> row : result.getResult()) { callback.apply(row); } } catch (ConnectionException e) { e.printStackTrace(); } } };
@Override public ByteBuffer readChunk(String objectName, int chunkId) throws Exception { return keyspace.prepareQuery(cf).setConsistencyLevel(readConsistencyLevel).withRetryPolicy(retryPolicy) .getKey(getRowKey(objectName, chunkId)).getColumn(getColumnName(Columns.DATA)).execute().getResult() .getByteBufferValue(); }
@Override public ByteBuffer readChunk(String objectName, int chunkId) throws Exception { return keyspace.prepareQuery(cf).setConsistencyLevel(readConsistencyLevel).withRetryPolicy(retryPolicy) .getKey(getRowKey(objectName, chunkId)).getColumn(getColumnName(Columns.DATA)).execute().getResult() .getByteBufferValue(); }
/** * Perform a single read operation * * @param key * @return * @throws Exception This could throw exceptions when there are exceptions in read path */ @Override public String readSingle(String key) throws Exception { ColumnList<Integer> result = keyspace.prepareQuery(this.CF) .setConsistencyLevel(ConsistencyLevel.valueOf(config.getReadConsistencyLevel())) .getRow(key) .execute().getResult(); if (!result.isEmpty()) { if (result.size() < (config.getColsPerRow())) { throw new Exception("Num Cols returned not ok " + result.size()); } } else { return CacheMiss; } return ResultOK; }
public C isUnique(K key) throws ConnectionException { C unique = uniqueColumnSupplier.get(); // Phase 1: Write a unique column MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); m.withRow(columnFamily, key).putEmptyColumn(unique, ttl); m.execute(); // Phase 2: Read back all columns. There should be only 1 ColumnList<C> result = keyspace.prepareQuery(columnFamily).setConsistencyLevel(consistencyLevel).getKey(key) .execute().getResult(); if (result.size() == 1) { return unique; } if (this.monitor != null) this.monitor.onViolation(key, unique); // Rollback m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); m.withRow(columnFamily, key).deleteColumn(unique); m.execute(); return null; } }
public C isUnique(K key) throws ConnectionException { C unique = uniqueColumnSupplier.get(); // Phase 1: Write a unique column MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); m.withRow(columnFamily, key).putEmptyColumn(unique, ttl); m.execute(); // Phase 2: Read back all columns. There should be only 1 ColumnList<C> result = keyspace.prepareQuery(columnFamily).setConsistencyLevel(consistencyLevel).getKey(key) .execute().getResult(); if (result.size() == 1) { return unique; } if (this.monitor != null) this.monitor.onViolation(key, unique); // Rollback m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); m.withRow(columnFamily, key).deleteColumn(unique); m.execute(); return null; } }