public boolean appliesTo(ColumnFamily current) { if (current == null) return true; Iterator<Cell> iter = current.iterator(new ColumnSlice[]{ rowPrefix.slice() }); while (iter.hasNext()) if (iter.next().isLive(now)) return false; return true; } }
public boolean appliesTo(ColumnFamily current) { if (current == null) return false; Iterator<Cell> iter = current.iterator(new ColumnSlice[]{ rowPrefix.slice() }); while (iter.hasNext()) if (iter.next().isLive(now)) return true; return false; } }
public IDiskAtomFilter readFilter() { assert !conditions.isEmpty(); ColumnSlice[] slices = new ColumnSlice[conditions.size()]; int i = 0; // We always read CQL rows entirely as on CAS failure we want to be able to distinguish between "row exists // but all values for which there were conditions are null" and "row doesn't exists", and we can't rely on the // row marker for that (see #6623) for (Composite prefix : conditions.keySet()) slices[i++] = prefix.slice(); int toGroup = cfm.comparator.isDense() ? -1 : cfm.clusteringColumns().size(); slices = ColumnSlice.deoverlapSlices(slices, cfm.comparator); assert ColumnSlice.validateSlices(slices, cfm.comparator, false); return new SliceQueryFilter(slices, false, slices.length, toGroup); }
public void applyUpdates(ColumnFamily current, ColumnFamily updates) throws InvalidRequestException { Map<ByteBuffer, CQL3Row> map = null; if (stmt.requiresRead()) { // Uses the "current" values read by Paxos for lists operation that requires a read Iterator<CQL3Row> iter = cfm.comparator.CQL3RowBuilder(cfm, now).group(current.iterator(new ColumnSlice[]{ rowPrefix.slice() })); if (iter.hasNext()) { map = Collections.singletonMap(key, iter.next()); assert !iter.hasNext() : "We shoudn't be updating more than one CQL row per-ModificationStatement"; } } UpdateParameters params = new UpdateParameters(cfm, options, timestamp, stmt.getTimeToLive(options), map); stmt.addUpdateForKey(updates, key, rowPrefix, params); } }
public static IDiskAtomFilter fromSCNamesFilter(CellNameType type, ByteBuffer scName, NamesQueryFilter filter) { if (scName == null) { ColumnSlice[] slices = new ColumnSlice[filter.columns.size()]; int i = 0; for (CellName name : filter.columns) { // Note that, because the filter in argument is the one from thrift, 'name' are SimpleDenseCellName. // So calling name.slice() would be incorrect, as simple cell names don't handle the EOC properly. // This is why we call toByteBuffer() and rebuild a Composite of the right type before call slice(). slices[i++] = type.make(name.toByteBuffer()).slice(); } return new SliceQueryFilter(slices, false, slices.length, 1); } else { SortedSet<CellName> newColumns = new TreeSet<>(type); for (CellName c : filter.columns) newColumns.add(type.makeCellName(scName, c.toByteBuffer())); return filter.withUpdatedColumns(newColumns); } }
ColumnSlice[] slices = new ColumnSlice[]{ clusteringPrefix.slice() }; List<ReadCommand> commands = new ArrayList<ReadCommand>(partitionKeys.size()); long now = System.currentTimeMillis();
public void addUpdateForKey(ColumnFamily cf, ByteBuffer key, Composite prefix, UpdateParameters params) throws InvalidRequestException { List<Operation> deletions = getOperations(); if (deletions.isEmpty()) { // We delete the slice selected by the prefix. // However, for performance reasons, we distinguish 2 cases: // - It's a full internal row delete // - It's a full cell name (i.e it's a dense layout and the prefix is full) if (prefix.isEmpty()) { // No columns specified, delete the row cf.delete(new DeletionInfo(params.timestamp, params.localDeletionTime)); } else if (cfm.comparator.isDense() && prefix.size() == cfm.clusteringColumns().size()) { cf.addAtom(params.makeTombstone(cfm.comparator.create(prefix, null))); } else { cf.addAtom(params.makeRangeTombstone(prefix.slice())); } } else { for (Operation op : deletions) op.execute(key, cf, prefix, params); } }
? new ColumnSlice[]{ baseCfs.metadata.comparator.staticPrefix().slice(), dataSlice } : new ColumnSlice[]{ dataSlice }; SliceQueryFilter dataFilter = new SliceQueryFilter(slices, false, Integer.MAX_VALUE, baseCfs.metadata.clusteringColumns().size());