/** * Execute the query again and set the reuslts */ private void advanceIterator() { //run producing the values within a hystrix command. This way we'll time out if the read takes too long try { sourceIterator = rowQuery.execute().getResult().iterator(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to get next page", e ); } } }
final ConnectionPoolType poolType = ConnectionPoolType.valueOf(config.get(CONNECTION_POOL_TYPE)); final NodeDiscoveryType discType = NodeDiscoveryType.valueOf(config.get(NODE_DISCOVERY_TYPE)); new ConnectionPoolConfigurationImpl(usedFor + "TitanConnectionPool") .setPort(port) .setMaxOperationsPerConnection(maxOperationsPerConnection) .setMaxConnsPerHost(maxConnsPerHost) .setRetryDelaySlice(retryDelaySlice) .setRetryMaxDelaySlice(retryMaxDelaySlice) .setRetrySuspendWindow(retrySuspendWindow) .setSocketTimeout(connectionTimeout) .setConnectTimeout(connectionTimeout) .setSeeds(StringUtils.join(hostnames, ",")); cpool.setRetryBackoffStrategy(retryBackoffStrategy); log.debug("Custom RetryBackoffStrategy {}", cpool.getRetryBackoffStrategy()); } else { log.debug("Default RetryBackoffStrategy {}", cpool.getRetryBackoffStrategy()); cpool.setLocalDatacenter(localDatacenter); log.debug("Set local datacenter: {}", cpool.getLocalDatacenter()); cpool.setMaxConns(maxConnections); cpool.setAuthenticationCredentials(new SimpleAuthenticationCredentials(username, password)); cpool.setSSLConnectionContext(new SSLConnectionContext(config.get(SSL_TRUSTSTORE_LOCATION), config.get(SSL_TRUSTSTORE_PASSWORD)));
.setDiscoveryType( NodeDiscoveryType.valueOf( cassandraFig.getDiscoveryType() ) ) .setTargetCassandraVersion( cassandraFig.getVersion() ) .setDefaultReadConsistencyLevel( cassandraConfig.getReadCL() )
@Override public List<MvccLogEntry> load( final ApplicationScope collectionScope, final Id entityId, final UUID version, final int maxSize ) { Preconditions.checkNotNull( collectionScope, "collectionScope is required" ); Preconditions.checkNotNull( entityId, "entity id is required" ); Preconditions.checkNotNull( version, "version is required" ); Preconditions.checkArgument( maxSize > 0, "max Size must be greater than 0" ); ColumnList<UUID> columns; try { final Id applicationId = collectionScope.getApplication(); final ScopedRowKey<K> rowKey = createKey( applicationId, entityId ); columns = keyspace.prepareQuery( CF_ENTITY_LOG ).getKey( rowKey ).withColumnRange( version, null, false, maxSize ) .execute().getResult(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to load log entries", e ); } return parseResults( columns, entityId ); }
@Override public List<MvccLogEntry> loadReversed( final ApplicationScope applicationScope, final Id entityId, final UUID minVersion, final int maxSize ) { ColumnList<UUID> columns; try { final Id applicationId = applicationScope.getApplication(); final ScopedRowKey<K> rowKey = createKey( applicationId, entityId ); columns = keyspace.prepareQuery( CF_ENTITY_LOG ).getKey( rowKey ) .withColumnRange( minVersion, null, true, maxSize ).execute().getResult(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to load log entries", e ); } return parseResults( columns, entityId ); }
@Override public int getSystemVersion() { try { return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( LEGACY_ROW_KEY ).getColumn( COLUMN_VERSION ) .execute().getResult().getIntegerValue(); } //swallow, it doesn't exist catch ( NotFoundException nfe ) { return 0; } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to retrieve status", e ); } }
result = query.execute().getResult();
@Override public String getStatusMessage(final String pluginName) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( rowKey ).getColumn( COL_STATUS_MESSAGE ) .execute().getResult().getStringValue(); } //swallow, it doesn't exist catch ( NotFoundException nfe ) { return null; } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to retrieve status", e ); } }
@Override public int getStatusCode(final String pluginName) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( rowKey ).getColumn( COLUMN_STATUS_CODE ) .execute().getResult().getIntegerValue(); } //swallow, it doesn't exist catch ( NotFoundException nfe ) { return 0; } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to retrieve status", e ); } }
@Override public int getVersion(final String pluginName) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( rowKey ).getColumn( COLUMN_VERSION ).execute() .getResult().getIntegerValue(); } //swallow, it doesn't exist catch ( NotFoundException nfe ) { return 0; } catch ( ConnectionException e ) { AstyanaxUtils.isSchemaMissing("Unable to connect to cassandra to retrieve status", e); return 0; } }
@Override public Optional<Long> getMaxVersion( final ApplicationScope scope, final Id node ) { ValidationUtils.validateApplicationScope( scope ); ValidationUtils.verifyIdentity( node ); ColumnFamilyQuery<ScopedRowKey<Id>, Boolean> query = keyspace.prepareQuery( GRAPH_DELETE ).setConsistencyLevel( fig.getReadCL() ); Column<Boolean> result = null; try { result = query.getKey( ScopedRowKey.fromKey( scope.getApplication(), node ) ).getColumn( COLUMN_NAME ).execute() .getResult(); } catch(NotFoundException nfe){ //swallow, there's just no column return Optional.absent(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to connect to casandra", e ); } return Optional.of( result.getLongValue() ); }
@Override public Health getHealth() { try { ColumnFamily<String, String> CF_SYSTEM_LOCAL = new ColumnFamily<String, String>( "system.local", StringSerializer.get(), StringSerializer.get(), StringSerializer.get() ); OperationResult<CqlResult<String, String>> result = keyspace.prepareQuery( CF_SYSTEM_LOCAL ) .setConsistencyLevel(ConsistencyLevel.CL_ONE) .withCql( "SELECT now() FROM system.local;" ) .execute(); if ( result.getResult().getRows().size() > 0 ) { return Health.GREEN; } } catch ( ConnectionException ex ) { logger.error( "Error connecting to Cassandra", ex ); } return Health.RED; } }
.withColumnRange( maxVersion, null, false, 1 ).execute().getResult() .iterator();
try { results = query.getRowSlice( keys ).withColumnSlice( Collections.singletonList( COLUMN_NAME )).execute() .getResult();
.withColumnSlice( COL_VALUE ).execute().getResult();
result = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) op).getResult(); } catch (ConnectionException e) { throw new PermanentBackendException(e);
Rows<ByteBuffer,ByteBuffer> rows = r.getResult(); Map<StaticBuffer, EntryList> result = new HashMap<StaticBuffer, EntryList>(rows.size());
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
Column<String> column = keyspace.prepareQuery( cf ).getKey( rowKey ).getColumn( colName ).execute().getResult(); column = keyspace.prepareQuery( cf ).getKey( rowKey ).getColumn( colName ).execute().getResult(); fail( "I shouldn't return a value" ); column = keyspace.prepareQuery( cf ).getKey( rowKey ).getColumn( colName ).execute().getResult(); fail( "I shouldn't return a value" );
Column<String> col = keyspace.prepareQuery( testCf ).getKey( key ).getColumn( colname ).execute().getResult(); keyspace.prepareQuery( testCf ).getKey( key ).getColumn( colname ).execute().getResult(); batch.execute(); col = keyspace.prepareQuery( testCf ).getKey( key ).getColumn( colname ).execute().getResult(); col = keyspace.prepareQuery( testCf ).getKey( key ).getColumn( colname ).execute().getResult(); keyspace.prepareQuery( testCf ).getKey( key ).getColumn( colname ).execute().getResult();