private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, long timestamp, SlicePredicate predicate, ConsistencyLevel consistency_level, ClientState cState) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, predicate); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace); List<ReadCommand> commands = new ArrayList<ReadCommand>(keys.size()); IDiskAtomFilter filter = toInternalFilter(metadata, column_parent, predicate); for (ByteBuffer key: keys) { ThriftValidation.validateKey(metadata, key); // Note that we should not share a slice filter amongst the command, due to SliceQueryFilter not being immutable // due to its columnCounter used by the lastCounted() method (also see SelectStatement.getSliceCommands) commands.add(ReadCommand.create(keyspace, key, column_parent.getColumn_family(), timestamp, filter.cloneShallow())); } return getSlice(commands, column_parent.isSetSuper_column(), consistencyLevel, cState); }
cl.validateForRead(keyspace());
cl.validateForRead(keyspace());
cl.validateForRead(keyspace());
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, int nowInSec, SlicePredicate predicate, ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, predicate); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace); List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); ColumnFilter columnFilter = makeColumnFilter(metadata, column_parent, predicate); ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, predicate); DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); for (ByteBuffer key: keys) { ThriftValidation.validateKey(metadata, key); DecoratedKey dk = metadata.decorateKey(key); commands.add(SinglePartitionReadCommand.create(true, metadata, nowInSec, columnFilter, RowFilter.NONE, limits, dk, filter)); } return getSlice(commands, column_parent.isSetSuper_column(), limits.perPartitionCount(), consistencyLevel, cState, queryStartNanoTime); }
private Map<ByteBuffer, List<ColumnOrSuperColumn>> multigetSliceInternal(String keyspace, List<ByteBuffer> keys, ColumnParent column_parent, int nowInSec, SlicePredicate predicate, ConsistencyLevel consistency_level, ClientState cState, long queryStartNanoTime) throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException { CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_parent.column_family); ThriftValidation.validateColumnParent(metadata, column_parent); ThriftValidation.validatePredicate(metadata, column_parent, predicate); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace); List<SinglePartitionReadCommand> commands = new ArrayList<>(keys.size()); ColumnFilter columnFilter = makeColumnFilter(metadata, column_parent, predicate); ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, predicate); DataLimits limits = getLimits(1, metadata.isSuper() && !column_parent.isSetSuper_column(), predicate); for (ByteBuffer key: keys) { ThriftValidation.validateKey(metadata, key); DecoratedKey dk = metadata.decorateKey(key); commands.add(SinglePartitionReadCommand.create(true, metadata, nowInSec, columnFilter, RowFilter.NONE, limits, dk, filter)); } return getSlice(commands, column_parent.isSetSuper_column(), limits.perPartitionCount(), consistencyLevel, cState, queryStartNanoTime); }
ThriftValidation.validateColumnParent(metadata, request.getColumn_parent()); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(request.getConsistency_level()); consistencyLevel.validateForRead(keyspace); List<ReadCommand> commands = new ArrayList<>(1); ColumnSlice[] slices = new ColumnSlice[request.getColumn_slices().size()];
ThriftValidation.validateColumnParent(metadata, request.getColumn_parent()); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(request.getConsistency_level()); consistencyLevel.validateForRead(keyspace);
ThriftValidation.validateColumnPath(metadata, column_path); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace);
ThriftValidation.validateColumnParent(metadata, request.getColumn_parent()); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(request.getConsistency_level()); consistencyLevel.validateForRead(keyspace);
private static void validateSelect(String keyspace, SelectStatement select, List<ByteBuffer> variables) throws InvalidRequestException { select.getConsistencyLevel().validateForRead(keyspace); // Finish key w/o start key (KEY < foo) if (!select.isKeyRange() && (select.getKeyFinish() != null)) throw new InvalidRequestException("Key range clauses must include a start key (i.e. KEY > term)"); // Key range and by-key(s) combined (KEY > foo AND KEY = bar) if (select.isKeyRange() && select.getKeys().size() > 0) throw new InvalidRequestException("You cannot combine key range and by-key clauses in a SELECT"); // Start and finish keys, *and* column relations (KEY > foo AND KEY < bar and name1 = value1). if (select.isKeyRange() && (select.getKeyFinish() != null) && (select.getColumnRelations().size() > 0)) throw new InvalidRequestException("You cannot combine key range and by-column clauses in a SELECT"); // Can't use more than one KEY = if (!select.isMultiKey() && select.getKeys().size() > 1) throw new InvalidRequestException("You cannot use more than one KEY = in a SELECT"); if (select.getColumnRelations().size() > 0) { ColumnFamilyStore cfstore = Keyspace.open(keyspace).getColumnFamilyStore(select.getColumnFamily()); CellNameType comparator = cfstore.metadata.comparator; AbstractType<?> at = comparator.asAbstractType(); SecondaryIndexManager idxManager = cfstore.indexManager; for (Relation relation : select.getColumnRelations()) { ByteBuffer name = relation.getEntity().getByteBuffer(at, variables); if ((relation.operator() == RelationType.EQ) && idxManager.indexes(comparator.cellFromByteBuffer(name))) return; } throw new InvalidRequestException("No indexed columns present in by-columns clause with \"equals\" operator"); } }
public ResultMessage.Rows execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { ConsistencyLevel cl = options.getConsistency(); checkNotNull(cl, "Invalid empty consistency level"); cl.validateForRead(keyspace()); int nowInSec = FBUtilities.nowInSeconds(); int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) return execute(query, options, state, nowInSec, userLimit, queryStartNanoTime); QueryPager pager = getPager(query, options); return execute(Pager.forDistributedQuery(pager, cl, state.getClientState()), options, pageSize, nowInSec, userLimit, queryStartNanoTime); }
public ResultMessage.Rows execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { ConsistencyLevel cl = options.getConsistency(); checkNotNull(cl, "Invalid empty consistency level"); cl.validateForRead(keyspace()); int nowInSec = FBUtilities.nowInSeconds(); int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) return execute(query, options, state, nowInSec, userLimit, queryStartNanoTime); QueryPager pager = getPager(query, options); return execute(Pager.forDistributedQuery(pager, cl, state.getClientState()), options, pageSize, nowInSec, userLimit, queryStartNanoTime); }
public ResultMessage.Rows execute(QueryState state, QueryOptions options, long queryStartNanoTime) throws RequestExecutionException, RequestValidationException { ConsistencyLevel cl = options.getConsistency(); checkNotNull(cl, "Invalid empty consistency level"); cl.validateForRead(keyspace()); int nowInSec = FBUtilities.nowInSeconds(); int userLimit = getLimit(options); int userPerPartitionLimit = getPerPartitionLimit(options); int pageSize = options.getPageSize(); ReadQuery query = getQuery(options, nowInSec, userLimit, userPerPartitionLimit, pageSize); if (aggregationSpec == null && (pageSize <= 0 || (query.limits().count() <= pageSize))) return execute(query, options, state, nowInSec, userLimit, queryStartNanoTime); QueryPager pager = getPager(query, options); return execute(Pager.forDistributedQuery(pager, cl, state.getClientState()), options, pageSize, nowInSec, userLimit, queryStartNanoTime); }
throw new InvalidRequestException("Invalid empty consistency level"); cl.validateForRead(keyspace());
ThriftValidation.validateIndexClauses(metadata, index_clause); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace);
ThriftValidation.validateIndexClauses(metadata, index_clause); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace);
ThriftValidation.validateIndexClauses(metadata, index_clause); org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level); consistencyLevel.validateForRead(keyspace);
consistencyLevel.validateForRead(keyspace);