public ColumnNameIterator( RowQuery<?, C> rowQuery, final ColumnParser<C, T> parser, final boolean skipFirst ) { this.rowQuery = rowQuery.autoPaginate( true ); this.parser = parser; this.skipFirst = skipFirst; }
@Override public int getSystemVersion() { try { return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( LEGACY_ROW_KEY ).getColumn( COLUMN_VERSION ) .execute().getResult().getIntegerValue(); } //swallow, it doesn't exist catch ( NotFoundException nfe ) { return 0; } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to retrieve status", e ); } }
@Override public List<MvccLogEntry> loadReversed( final ApplicationScope applicationScope, final Id entityId, final UUID minVersion, final int maxSize ) { ColumnList<UUID> columns; try { final Id applicationId = applicationScope.getApplication(); final ScopedRowKey<K> rowKey = createKey( applicationId, entityId ); columns = keyspace.prepareQuery( CF_ENTITY_LOG ).getKey( rowKey ) .withColumnRange( minVersion, null, true, maxSize ).execute().getResult(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to load log entries", e ); } return parseResults( columns, entityId ); }
@Override public Optional<Long> getMaxVersion( final ApplicationScope scope, final Id node ) { ValidationUtils.validateApplicationScope( scope ); ValidationUtils.verifyIdentity( node ); ColumnFamilyQuery<ScopedRowKey<Id>, Boolean> query = keyspace.prepareQuery( GRAPH_DELETE ).setConsistencyLevel( fig.getReadCL() ); Column<Boolean> result = null; try { result = query.getKey( ScopedRowKey.fromKey( scope.getApplication(), node ) ).getColumn( COLUMN_NAME ).execute() .getResult(); } catch(NotFoundException nfe){ //swallow, there's just no column return Optional.absent(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to connect to casandra", e ); } return Optional.of( result.getLongValue() ); }
@Override public Iterator<MvccEntity> loadAscendingHistory( final ApplicationScope applicationScope, final Id entityId, final UUID version, final int fetchSize ) { Preconditions.checkNotNull( applicationScope, "applicationScope is required" ); Preconditions.checkNotNull( entityId, "entity id is required" ); Preconditions.checkNotNull( version, "version is required" ); Preconditions.checkArgument( fetchSize > 0, "max Size must be greater than 0" ); final Id applicationId = applicationScope.getApplication(); final Id ownerId = applicationId; final String collectionName = LegacyScopeUtils.getCollectionScopeNameFromEntityType( entityId.getType() ); final CollectionPrefixedKey<Id> collectionPrefixedKey = new CollectionPrefixedKey<>( collectionName, ownerId, entityId ); final ScopedRowKey<CollectionPrefixedKey<Id>> rowKey = ScopedRowKey.fromKey( applicationId, collectionPrefixedKey ); RowQuery<ScopedRowKey<CollectionPrefixedKey<Id>>, UUID> query = keyspace.prepareQuery( columnFamily ).getKey( rowKey ) .withColumnRange( null, version, true, fetchSize ); return new ColumnNameIterator( query, new MvccColumnParser( entityId, getEntitySerializer() ), false ); }
.setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeySlice(CassandraHelper.convert(keys)); rq.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, r = (OperationResult<Rows<ByteBuffer, ByteBuffer>>) rq.execute(); } catch (ConnectionException e) { throw new TemporaryBackendException(e);
/** * Get the edge types from the search criteria. * * @param scope The org scope * @param search The edge type search info * @param cf The column family to execute on */ private Iterator<String> getEdgeTypes( final ApplicationScope scope, final SearchEdgeType search, final MultiTenantColumnFamily<ScopedRowKey<Id>, String> cf ) { ValidationUtils.validateApplicationScope( scope ); GraphValidation.validateSearchEdgeType( search ); final ScopedRowKey< Id> sourceKey = new ScopedRowKey<>( scope.getApplication(), search.getNode() ); //resume from the last if specified. Also set the range final RangeBuilder rangeBuilder = createRange( search ); RowQuery<ScopedRowKey<Id>, String> query = keyspace.prepareQuery( cf ).getKey( sourceKey ).autoPaginate( true ) .withColumnRange( rangeBuilder.build() ); return new ColumnNameIterator<>( query, PARSER, search.getLast().isPresent() ); }
@Override public Iterator<Shard> getShardMetaData( final ApplicationScope scope, final Optional<Shard> start, final DirectedEdgeMeta metaData ) { ValidationUtils.validateApplicationScope( scope ); GraphValidation.validateDirectedEdgeMeta( metaData ); Preconditions.checkNotNull( metaData, "metadata must be present" ); /** * If the edge is present, we need to being seeking from this */ final RangeBuilder rangeBuilder = new RangeBuilder().setLimit( graphFig.getScanPageSize() ); if ( start.isPresent() ) { final Shard shard = start.get(); GraphValidation.valiateShard( shard ); rangeBuilder.setStart( shard.getShardIndex() ); } final ScopedRowKey rowKey = ScopedRowKey.fromKey( scope.getApplication(), metaData ); final RowQuery<ScopedRowKey<DirectedEdgeMeta>, Long> query = keyspace.prepareQuery( EDGE_SHARDS ).setConsistencyLevel( cassandraConfig.getReadCL() ).getKey( rowKey ) .autoPaginate( true ).withColumnRange( rangeBuilder.build() ); return new ColumnNameIterator<>( query, COLUMN_PARSER, false ); }
@Override public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException { // this query could only be done when byte-ordering partitioner is used // because Cassandra operates on tokens internally which means that even contiguous // range of keys (e.g. time slice) with random partitioner could produce disjoint set of tokens // returning ambiguous results to the user. Partitioner partitioner = storeManager.getPartitioner(); if (partitioner != Partitioner.BYTEORDER) throw new PermanentBackendException("getKeys(KeyRangeQuery could only be used with byte-ordering partitioner."); ByteBuffer start = query.getKeyStart().asByteBuffer(), end = query.getKeyEnd().asByteBuffer(); RowSliceQuery rowSlice = keyspace.prepareQuery(columnFamily) .setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()) .withRetryPolicy(retryPolicy.duplicate()) .getKeyRange(start, end, null, null, Integer.MAX_VALUE); // Astyanax is bad at builder pattern :( rowSlice.withColumnRange(query.getSliceStart().asByteBuffer(), query.getSliceEnd().asByteBuffer(), false, query.getLimit()); // Omit final the query's keyend from the result, if present in result final Rows<ByteBuffer, ByteBuffer> r; try { r = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) rowSlice.execute()).getResult(); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } Iterator<Row<ByteBuffer, ByteBuffer>> i = Iterators.filter(r.iterator(), new KeySkipPredicate(query.getKeyEnd().asByteBuffer())); return new RowIterator(i, query); }
@Override public Health getHealth() { try { ColumnFamily<String, String> CF_SYSTEM_LOCAL = new ColumnFamily<String, String>( "system.local", StringSerializer.get(), StringSerializer.get(), StringSerializer.get() ); OperationResult<CqlResult<String, String>> result = keyspace.prepareQuery( CF_SYSTEM_LOCAL ) .setConsistencyLevel(ConsistencyLevel.CL_ONE) .withCql( "SELECT now() FROM system.local;" ) .execute(); if ( result.getResult().getRows().size() > 0 ) { return Health.GREEN; } } catch ( ConnectionException ex ) { logger.error( "Error connecting to Cassandra", ex ); } return Health.RED; } }
throw new PermanentBackendException("This operation is only allowed when random partitioner (md5 or murmur3) is used."); AllRowsQuery allRowsQuery = keyspace.prepareQuery(columnFamily).getAllRows(); allRowsQuery.withColumnRange(sliceQuery.getSliceStart().asByteBuffer(), sliceQuery.getSliceEnd().asByteBuffer(), false, try { OperationResult op = allRowsQuery.setRowLimit(storeManager.getPageSize()) // pre-fetch that many rows at a time .setConcurrencyLevel(1) // one execution thread for fetching portion of rows .setExceptionCallback(new ExceptionCallback() { private int retries = 0; }).execute();
/** * Execute the query again and set the reuslts */ private void advanceIterator() { //run producing the values within a hystrix command. This way we'll time out if the read takes too long try { sourceIterator = rowQuery.execute().getResult().iterator(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to get next page", e ); } } }
@Override public String getStatusMessage(final String pluginName) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( rowKey ).getColumn( COL_STATUS_MESSAGE ) .execute().getResult().getStringValue(); } //swallow, it doesn't exist catch ( NotFoundException nfe ) { return null; } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to retrieve status", e ); } }
@Override public List<MvccLogEntry> load( final ApplicationScope collectionScope, final Id entityId, final UUID version, final int maxSize ) { Preconditions.checkNotNull( collectionScope, "collectionScope is required" ); Preconditions.checkNotNull( entityId, "entity id is required" ); Preconditions.checkNotNull( version, "version is required" ); Preconditions.checkArgument( maxSize > 0, "max Size must be greater than 0" ); ColumnList<UUID> columns; try { final Id applicationId = collectionScope.getApplication(); final ScopedRowKey<K> rowKey = createKey( applicationId, entityId ); columns = keyspace.prepareQuery( CF_ENTITY_LOG ).getKey( rowKey ).withColumnRange( version, null, false, maxSize ) .execute().getResult(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to load log entries", e ); } return parseResults( columns, entityId ); }
@Override public Iterator<MvccEntity> loadDescendingHistory( final ApplicationScope applicationScope, final Id entityId, final UUID version, final int fetchSize ) { Preconditions.checkNotNull( applicationScope, "applicationScope is required" ); Preconditions.checkNotNull( entityId, "entity id is required" ); Preconditions.checkNotNull( version, "version is required" ); Preconditions.checkArgument( fetchSize > 0, "max Size must be greater than 0" ); final Id applicationId = applicationScope.getApplication(); final Id ownerId = applicationId; final String collectionName = LegacyScopeUtils.getCollectionScopeNameFromEntityType( entityId.getType() ); final CollectionPrefixedKey<Id> collectionPrefixedKey = new CollectionPrefixedKey<>( collectionName, ownerId, entityId ); final ScopedRowKey<CollectionPrefixedKey<Id>> rowKey = ScopedRowKey.fromKey( applicationId, collectionPrefixedKey ); RowQuery<ScopedRowKey<CollectionPrefixedKey<Id>>, UUID> query = keyspace.prepareQuery( columnFamily ).getKey( rowKey ) .withColumnRange( version, null, false, fetchSize ); return new ColumnNameIterator( query, new MvccColumnParser( entityId, getEntitySerializer() ), false ); }
/** * Get the id types from the specified column family * * @param scope The organization scope to use * @param search The search criteria * @param cf The column family to search */ public Iterator<String> getIdTypes( final ApplicationScope scope, final SearchIdType search, final MultiTenantColumnFamily<ScopedRowKey<EdgeIdTypeKey>, String> cf ) { ValidationUtils.validateApplicationScope( scope ); GraphValidation.validateSearchEdgeIdType( search ); final ScopedRowKey<EdgeIdTypeKey> sourceTypeKey = new ScopedRowKey<>( scope.getApplication(), new EdgeIdTypeKey( search.getNode(), search.getEdgeType() ) ); final RangeBuilder rangeBuilder = createRange( search ); RowQuery<ScopedRowKey<EdgeIdTypeKey>, String> query = keyspace.prepareQuery( cf ).getKey( sourceTypeKey ).autoPaginate( true ) .withColumnRange( rangeBuilder.build() ); return new ColumnNameIterator<>( query, PARSER, search.getLast().isPresent() ); }
@Override public int getStatusCode(final String pluginName) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( rowKey ).getColumn( COLUMN_STATUS_CODE ) .execute().getResult().getIntegerValue(); } //swallow, it doesn't exist catch ( NotFoundException nfe ) { return 0; } catch ( ConnectionException e ) { throw new DataMigrationException( "Unable to retrieve status", e ); } }
private static ColumnNameIterator<Long, Long> createIterator( final String rowKey, final boolean reversed ) { final ColumnParser<Long, Long> longParser = new ColumnParser<Long, Long>() { @Override public Long parseColumn( final Column<Long> column ) { return column.getName(); } }; final RangeBuilder forwardRange = new RangeBuilder().setLimit( 720 ).setReversed( reversed ); final RowQuery<String, Long> forwardQuery = keyspace.prepareQuery( COLUMN_FAMILY ).getKey( rowKey ).withColumnRange( forwardRange.build() ); ColumnNameIterator<Long, Long> itr = new ColumnNameIterator<>( forwardQuery, longParser, false ); return itr; } }
@Override public int getVersion(final String pluginName) { final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName); try { return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( rowKey ).getColumn( COLUMN_VERSION ).execute() .getResult().getIntegerValue(); } //swallow, it doesn't exist catch ( NotFoundException nfe ) { return 0; } catch ( ConnectionException e ) { AstyanaxUtils.isSchemaMissing("Unable to connect to cassandra to retrieve status", e); return 0; } }
Column<String> column = keyspace.prepareQuery( cf ).getKey( rowKey ).getColumn( colName ).execute().getResult(); column = keyspace.prepareQuery( cf ).getKey( rowKey ).getColumn( colName ).execute().getResult(); fail( "I shouldn't return a value" ); column = keyspace.prepareQuery( cf ).getKey( rowKey ).getColumn( colName ).execute().getResult(); fail( "I shouldn't return a value" );