public CellName makeClusteringKey(ByteBuffer primaryKey) { ByteBuffer clusteringKeyBuf = primaryKeyType.extractLastComponent(primaryKey); return clusteringCType.cellFromByteBuffer(clusteringKeyBuf); }
/** * Insert a new "regular" column to the current row (and super column if applicable). * @param name the column name * @param value the column value * @param timestamp the column timestamp */ public void addColumn(ByteBuffer name, ByteBuffer value, long timestamp) throws IOException { addColumn(new BufferCell(metadata.comparator.cellFromByteBuffer(name), value, timestamp)); }
/** * Insert a new expiring column to the current row (and super column if applicable). * @param name the column name * @param value the column value * @param timestamp the column timestamp * @param ttl the column time to live in seconds * @param expirationTimestampMS the local expiration timestamp in milliseconds. This is the server time timestamp used for actually * expiring the column, and as a consequence should be synchronized with the cassandra servers time. If {@code timestamp} represents * the insertion time in microseconds (which is not required), this should be {@code (timestamp / 1000) + (ttl * 1000)}. */ public void addExpiringColumn(ByteBuffer name, ByteBuffer value, long timestamp, int ttl, long expirationTimestampMS) throws IOException { addColumn(new BufferExpiringCell(metadata.comparator.cellFromByteBuffer(name), value, timestamp, ttl, (int)(expirationTimestampMS / 1000))); }
/** * Insert a new counter column to the current row (and super column if applicable). * @param name the column name * @param value the value of the counter */ public void addCounterColumn(ByteBuffer name, long value) throws IOException { addColumn(new BufferCounterCell(metadata.comparator.cellFromByteBuffer(name), CounterContext.instance().createGlobal(counterid, 1L, value), System.currentTimeMillis())); }
private static SortedSet<CellName> getColumnNames(SelectStatement select, CFMetaData metadata, List<ByteBuffer> variables) throws InvalidRequestException { String keyString = metadata.getCQL2KeyName(); List<Term> selectColumnNames = select.getColumnNames(); SortedSet<CellName> columnNames = new TreeSet<>(metadata.comparator); for (Term column : selectColumnNames) { // skip the key for the slice op; we'll add it to the resultset in extractThriftColumns if (!column.getText().equalsIgnoreCase(keyString)) columnNames.add(metadata.comparator.cellFromByteBuffer(column.getByteBuffer(metadata.comparator.asAbstractType(),variables))); } return columnNames; }
RangeSliceQueryPager(RangeSliceCommand command, ConsistencyLevel consistencyLevel, boolean localQuery, PagingState state) { this(command, consistencyLevel, localQuery); if (state != null) { lastReturnedKey = StorageService.getPartitioner().decorateKey(state.partitionKey); lastReturnedName = cfm.comparator.cellFromByteBuffer(state.cellName); restoreState(state.remaining, true); } }
private boolean needsExtraQuery(ByteBuffer rowKey, ColumnFamily data) { IDiskAtomFilter filter = columnFilter(rowKey); if (filter instanceof SliceQueryFilter && DataRange.isFullRowSlice((SliceQueryFilter)filter)) return false; for (IndexExpression expr : clause) { if (data.getColumn(data.getComparator().cellFromByteBuffer(expr.column)) == null) { logger.debug("adding extraFilter to cover additional expressions"); return true; } } return false; }
private IDiskAtomFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SlicePredicate predicate) { IDiskAtomFilter filter; if (predicate.column_names != null) { if (metadata.isSuper()) { CellNameType columnType = new SimpleDenseCellNameType(metadata.comparator.subtype(parent.isSetSuper_column() ? 1 : 0)); SortedSet<CellName> s = new TreeSet<>(columnType); for (ByteBuffer bb : predicate.column_names) s.add(columnType.cellFromByteBuffer(bb)); filter = SuperColumns.fromSCNamesFilter(metadata.comparator, parent.bufferForSuper_column(), new NamesQueryFilter(s)); } else { SortedSet<CellName> s = new TreeSet<CellName>(metadata.comparator); for (ByteBuffer bb : predicate.column_names) s.add(metadata.comparator.cellFromByteBuffer(bb)); filter = new NamesQueryFilter(s); } } else { filter = toInternalFilter(metadata, parent, predicate.slice_range); } return filter; }
private void addColumnOrSuperColumn(org.apache.cassandra.db.Mutation mutation, CFMetaData cfm, ColumnOrSuperColumn cosc) { if (cosc.super_column != null) { for (Column column : cosc.super_column.columns) { mutation.add(cfm.cfName, cfm.comparator.makeCellName(cosc.super_column.name, column.name), column.value, column.timestamp, column.ttl); } } else if (cosc.column != null) { mutation.add(cfm.cfName, cfm.comparator.cellFromByteBuffer(cosc.column.name), cosc.column.value, cosc.column.timestamp, cosc.column.ttl); } else if (cosc.counter_super_column != null) { for (CounterColumn column : cosc.counter_super_column.columns) { mutation.addCounter(cfm.cfName, cfm.comparator.makeCellName(cosc.counter_super_column.name, column.name), column.value); } } else // cosc.counter_column != null { mutation.addCounter(cfm.cfName, cfm.comparator.cellFromByteBuffer(cosc.counter_column.name), cosc.counter_column.value); } }
columns.add(cfs.getComparator().cellFromByteBuffer(expr.column)); columns.addAll(((NamesQueryFilter) filter).columns); return ((NamesQueryFilter) filter).withUpdatedColumns(columns);
public IDiskAtomFilter getExtraFilter(DecoratedKey rowKey, ColumnFamily data) { /* * This method assumes the IndexExpression names are valid column names, which is not the * case with composites. This is ok for now however since: * 1) CompositeSearcher doesn't use it. * 2) We don't yet allow non-indexed range slice with filters in CQL3 (i.e. this will never be * called by CFS.filter() for composites). */ assert !(cfs.getComparator().isCompound()) : "Sequential scan with filters is not supported (if you just created an index, you " + "need to wait for the creation to be propagated to all nodes before querying it)"; if (!needsExtraQuery(rowKey.getKey(), data)) return null; // Note: for counters we must be careful to not add a column that was already there (to avoid overcount). That is // why we do the dance of avoiding to query any column we already have (it's also more efficient anyway) SortedSet<CellName> columns = new TreeSet<CellName>(cfs.getComparator()); for (IndexExpression expr : clause) { CellName name = data.getComparator().cellFromByteBuffer(expr.column); if (data.getColumn(name) == null) columns.add(name); } assert !columns.isEmpty(); return new NamesQueryFilter(columns); }
public static IDiskAtomFilter asIFilter(SlicePredicate sp, CFMetaData metadata, ByteBuffer superColumn) { SliceRange sr = sp.slice_range; IDiskAtomFilter filter; CellNameType comparator = metadata.isSuper() ? new SimpleDenseCellNameType(metadata.comparator.subtype(superColumn == null ? 0 : 1)) : metadata.comparator; if (sr == null) { SortedSet<CellName> ss = new TreeSet<CellName>(comparator); for (ByteBuffer bb : sp.column_names) ss.add(comparator.cellFromByteBuffer(bb)); filter = new NamesQueryFilter(ss); } else { filter = new SliceQueryFilter(comparator.fromByteBuffer(sr.start), comparator.fromByteBuffer(sr.finish), sr.reversed, sr.count); } if (metadata.isSuper()) filter = SuperColumns.fromSCFilter(metadata.comparator, superColumn, filter); return filter; } }
public Mutation mutationForKey(ByteBuffer key, String keyspace, Long timestamp, ThriftClientState clientState, List<ByteBuffer> variables, CFMetaData metadata) throws InvalidRequestException { Mutation mutation = new Mutation(keyspace, key); QueryProcessor.validateKeyAlias(metadata, keyName); if (columns.size() < 1) { // No columns, delete the partition mutation.delete(columnFamily, (timestamp == null) ? getTimestamp(clientState) : timestamp); } else { // Delete specific columns AbstractType<?> at = metadata.comparator.asAbstractType(); for (Term column : columns) { CellName columnName = metadata.comparator.cellFromByteBuffer(column.getByteBuffer(at, variables)); validateColumnName(columnName); mutation.delete(columnFamily, columnName, (timestamp == null) ? getTimestamp(clientState) : timestamp); } } return mutation; }
private ByteBuffer extractDataValue(ColumnDefinition def, ByteBuffer rowKey, ColumnFamily data, Composite prefix) { switch (def.kind) { case PARTITION_KEY: return def.isOnAllComponents() ? rowKey : ((CompositeType)data.metadata().getKeyValidator()).split(rowKey)[def.position()]; case CLUSTERING_COLUMN: return prefix.get(def.position()); case REGULAR: CellName cname = prefix == null ? data.getComparator().cellFromByteBuffer(def.name.bytes) : data.getComparator().create(prefix, def); Cell cell = data.getColumn(cname); return cell == null ? null : cell.value(); case COMPACT_VALUE: assert data.getColumnCount() == 1; return data.getSortedColumns().iterator().next().value(); } throw new AssertionError(); }
? metadata.comparator.cellFromByteBuffer(column.name) : metadata.comparator.makeCellName(scName, column.name); try
return null; assert(cfs.metadata.isCounter()); final CellName cellName = cfs.metadata.comparator.cellFromByteBuffer(cellNameBuffer); return StageManager.getStage(Stage.READ).submit(new Callable<Pair<CounterCacheKey, ClockAndCount>>()
private static void validateSelect(String keyspace, SelectStatement select, List<ByteBuffer> variables) throws InvalidRequestException { select.getConsistencyLevel().validateForRead(keyspace); // Finish key w/o start key (KEY < foo) if (!select.isKeyRange() && (select.getKeyFinish() != null)) throw new InvalidRequestException("Key range clauses must include a start key (i.e. KEY > term)"); // Key range and by-key(s) combined (KEY > foo AND KEY = bar) if (select.isKeyRange() && select.getKeys().size() > 0) throw new InvalidRequestException("You cannot combine key range and by-key clauses in a SELECT"); // Start and finish keys, *and* column relations (KEY > foo AND KEY < bar and name1 = value1). if (select.isKeyRange() && (select.getKeyFinish() != null) && (select.getColumnRelations().size() > 0)) throw new InvalidRequestException("You cannot combine key range and by-column clauses in a SELECT"); // Can't use more than one KEY = if (!select.isMultiKey() && select.getKeys().size() > 1) throw new InvalidRequestException("You cannot use more than one KEY = in a SELECT"); if (select.getColumnRelations().size() > 0) { ColumnFamilyStore cfstore = Keyspace.open(keyspace).getColumnFamilyStore(select.getColumnFamily()); CellNameType comparator = cfstore.metadata.comparator; AbstractType<?> at = comparator.asAbstractType(); SecondaryIndexManager idxManager = cfstore.indexManager; for (Relation relation : select.getColumnRelations()) { ByteBuffer name = relation.getEntity().getByteBuffer(at, variables); if ((relation.operator() == RelationType.EQ) && idxManager.indexes(comparator.cellFromByteBuffer(name))) return; } throw new InvalidRequestException("No indexed columns present in by-columns clause with \"equals\" operator"); } }
private void internal_remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level, boolean isCommutativeOp) throws RequestValidationException, UnavailableException, TimedOutException { ThriftClientState cState = state(); String keyspace = cState.getKeyspace(); cState.hasColumnFamilyAccess(keyspace, column_path.column_family, Permission.MODIFY); CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, column_path.column_family, isCommutativeOp); ThriftValidation.validateKey(metadata, key); ThriftValidation.validateColumnPathOrParent(metadata, column_path); if (isCommutativeOp) ThriftConversion.fromThrift(consistency_level).validateCounterForWrite(metadata); org.apache.cassandra.db.Mutation mutation = new org.apache.cassandra.db.Mutation(keyspace, key); if (column_path.super_column == null && column_path.column == null) mutation.delete(column_path.column_family, timestamp); else if (column_path.super_column == null) mutation.delete(column_path.column_family, metadata.comparator.cellFromByteBuffer(column_path.column), timestamp); else if (column_path.column == null) mutation.deleteRange(column_path.column_family, SuperColumns.startOf(column_path.super_column), SuperColumns.endOf(column_path.super_column), timestamp); else mutation.delete(column_path.column_family, metadata.comparator.makeCellName(column_path.super_column, column_path.column), timestamp); if (isCommutativeOp) doInsert(consistency_level, Arrays.asList(new CounterMutation(mutation, ThriftConversion.fromThrift(consistency_level)))); else doInsert(consistency_level, Arrays.asList(mutation)); }
: metadata.comparator.cellFromByteBuffer(column.name);
mutation.delete(cfm.cfName, cfm.comparator.makeCellName(del.super_column, c), del.timestamp); else mutation.delete(cfm.cfName, cfm.comparator.cellFromByteBuffer(c), del.timestamp);