NamesQueryFilter nqf = new NamesQueryFilter(names); SliceByNamesReadCommand cmd = new SliceByNamesReadCommand(ks, ByteBufferUtil.zeroByteBuffer(1), cf, 1L, nqf); StorageProxy.read(ImmutableList.<ReadCommand> of(cmd), ConsistencyLevel.QUORUM);
public NamesQueryFilter withUpdatedColumns(SortedSet<CellName> newColumns) { return new NamesQueryFilter(newColumns, countCQL3Rows); }
/** * @return a QueryFilter object that will return columns matching the given names * @param key the row to slice * @param cfName column family to query * @param columns the column names to restrict the results to, sorted in comparator order */ public static QueryFilter getNamesFilter(DecoratedKey key, String cfName, SortedSet<CellName> columns, long timestamp) { return new QueryFilter(key, cfName, new NamesQueryFilter(columns), timestamp); }
public NamesQueryFilter deserialize(DataInput in, int version) throws IOException { int size = in.readInt(); SortedSet<CellName> columns = new TreeSet<CellName>(type); ISerializer<CellName> serializer = type.cellSerializer(); for (int i = 0; i < size; ++i) columns.add(serializer.deserialize(in)); boolean countCQL3Rows = in.readBoolean(); return new NamesQueryFilter(columns, countCQL3Rows); }
private void retryDummyRead(String ks, String cf) throws PermanentBackendException { final long limit = System.currentTimeMillis() + (60L * 1000L); while (System.currentTimeMillis() < limit) { try { SortedSet<ByteBuffer> ss = new TreeSet<ByteBuffer>(); ss.add(ByteBufferUtil.zeroByteBuffer(1)); NamesQueryFilter nqf = new NamesQueryFilter(ss); SliceByNamesReadCommand cmd = new SliceByNamesReadCommand(ks, ByteBufferUtil.zeroByteBuffer(1), cf, 1L, nqf); StorageProxy.read(ImmutableList.<ReadCommand> of(cmd), ConsistencyLevel.QUORUM); log.info("Read on CF {} in KS {} succeeded", cf, ks); return; } catch (Throwable t) { log.warn("Failed to read CF {} in KS {} following creation", cf, ks, t); } try { Thread.sleep(1000L); } catch (InterruptedException e) { throw new PermanentBackendException(e); } } throw new PermanentBackendException("Timed out while attempting to read CF " + cf + " in KS " + ks + " following creation"); } }
NamesQueryFilter nqf = new NamesQueryFilter(names); SliceByNamesReadCommand cmd = new SliceByNamesReadCommand(ks, ByteBufferUtil.zeroByteBuffer(1), cf, 1L, nqf); StorageProxy.read(ImmutableList.<ReadCommand> of(cmd), ConsistencyLevel.QUORUM);
private IDiskAtomFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SlicePredicate predicate) { IDiskAtomFilter filter; if (predicate.column_names != null) { if (metadata.isSuper()) { CellNameType columnType = new SimpleDenseCellNameType(metadata.comparator.subtype(parent.isSetSuper_column() ? 1 : 0)); SortedSet<CellName> s = new TreeSet<>(columnType); for (ByteBuffer bb : predicate.column_names) s.add(columnType.cellFromByteBuffer(bb)); filter = SuperColumns.fromSCNamesFilter(metadata.comparator, parent.bufferForSuper_column(), new NamesQueryFilter(s)); } else { SortedSet<CellName> s = new TreeSet<CellName>(metadata.comparator); for (ByteBuffer bb : predicate.column_names) s.add(metadata.comparator.cellFromByteBuffer(bb)); filter = new NamesQueryFilter(s); } } else { filter = toInternalFilter(metadata, parent, predicate.slice_range); } return filter; }
private void getCurrentValuesFromCFS(List<CounterUpdateCell> counterUpdateCells, ColumnFamilyStore cfs, ClockAndCount[] currentValues) { SortedSet<CellName> names = new TreeSet<>(cfs.metadata.comparator); for (int i = 0; i < currentValues.length; i++) if (currentValues[i] == null) names.add(counterUpdateCells.get(i).name()); ReadCommand cmd = new SliceByNamesReadCommand(getKeyspaceName(), key(), cfs.metadata.cfName, Long.MIN_VALUE, new NamesQueryFilter(names)); Row row = cmd.getRow(cfs.keyspace); ColumnFamily cf = row == null ? null : row.cf; for (int i = 0; i < currentValues.length; i++) { if (currentValues[i] != null) continue; Cell cell = cf == null ? null : cf.getColumn(counterUpdateCells.get(i).name()); if (cell == null || !cell.isLive()) // absent or a tombstone. currentValues[i] = ClockAndCount.BLANK; else currentValues[i] = CounterContext.instance().getLocalClockAndCount(cell.value()); } }
public IDiskAtomFilter readFilter() { return expected == null || expected.isEmpty() ? new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, false, 1) : new NamesQueryFilter(ImmutableSortedSet.copyOf(expected.getComparator(), expected.getColumnNames())); }
public IDiskAtomFilter getExtraFilter(DecoratedKey rowKey, ColumnFamily data) { /* * This method assumes the IndexExpression names are valid column names, which is not the * case with composites. This is ok for now however since: * 1) CompositeSearcher doesn't use it. * 2) We don't yet allow non-indexed range slice with filters in CQL3 (i.e. this will never be * called by CFS.filter() for composites). */ assert !(cfs.getComparator().isCompound()) : "Sequential scan with filters is not supported (if you just created an index, you " + "need to wait for the creation to be propagated to all nodes before querying it)"; if (!needsExtraQuery(rowKey.getKey(), data)) return null; // Note: for counters we must be careful to not add a column that was already there (to avoid overcount). That is // why we do the dance of avoiding to query any column we already have (it's also more efficient anyway) SortedSet<CellName> columns = new TreeSet<CellName>(cfs.getComparator()); for (IndexExpression expr : clause) { CellName name = data.getComparator().cellFromByteBuffer(expr.column); if (data.getColumn(name) == null) columns.add(name); } assert !columns.isEmpty(); return new NamesQueryFilter(columns); }
return null; QueryProcessor.validateCellNames(cellNames, cfm.comparator); return new NamesQueryFilter(cellNames, true);
public static IDiskAtomFilter asIFilter(SlicePredicate sp, CFMetaData metadata, ByteBuffer superColumn) { SliceRange sr = sp.slice_range; IDiskAtomFilter filter; CellNameType comparator = metadata.isSuper() ? new SimpleDenseCellNameType(metadata.comparator.subtype(superColumn == null ? 0 : 1)) : metadata.comparator; if (sr == null) { SortedSet<CellName> ss = new TreeSet<CellName>(comparator); for (ByteBuffer bb : sp.column_names) ss.add(comparator.cellFromByteBuffer(bb)); filter = new NamesQueryFilter(ss); } else { filter = new SliceQueryFilter(comparator.fromByteBuffer(sr.start), comparator.fromByteBuffer(sr.finish), sr.reversed, sr.count); } if (metadata.isSuper()) filter = SuperColumns.fromSCFilter(metadata.comparator, superColumn, filter); return filter; } }
/** * Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for * nodes which are never officially down/failed. */ private void scheduleAllDeliveries() { logger.debug("Started scheduleAllDeliveries"); // Force a major compaction to get rid of the tombstones and expired hints. Do it once, before we schedule any // individual replay, to avoid N - 1 redundant individual compactions (when N is the number of nodes with hints // to deliver to). compact(); IPartitioner p = StorageService.getPartitioner(); RowPosition minPos = p.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<>(minPos, minPos, p); IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<CellName>of()); List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis()); for (Row row : rows) { UUID hostId = UUIDGen.getUUID(row.key.getKey()); InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId); // token may have since been removed (in which case we have just read back a tombstone) if (target != null) scheduleHintDelivery(target, false); } logger.debug("Finished scheduleAllDeliveries"); }
SortedSet<CellName> names = new TreeSet<CellName>(columnType); names.add(columnType.cellFromByteBuffer(column_path.column == null ? column_path.super_column : column_path.column)); filter = SuperColumns.fromSCNamesFilter(metadata.comparator, column_path.column == null ? null : column_path.bufferForSuper_column(), new NamesQueryFilter(names)); filter = new NamesQueryFilter(names);
private static IDiskAtomFilter filterFromSelect(SelectStatement select, CFMetaData metadata, List<ByteBuffer> variables) throws InvalidRequestException { if (select.isColumnRange() || select.getColumnNames().size() == 0) { AbstractType<?> comparator = metadata.comparator.asAbstractType(); return new SliceQueryFilter(metadata.comparator.fromByteBuffer(select.getColumnStart().getByteBuffer(comparator, variables)), metadata.comparator.fromByteBuffer(select.getColumnFinish().getByteBuffer(comparator, variables)), select.isColumnsReversed(), select.getColumnsLimit()); } else { return new NamesQueryFilter(getColumnNames(select, metadata, variables)); } }
commands.add(new SliceByNamesReadCommand(metadata.ksName, key, select.getColumnFamily(), now, new NamesQueryFilter(columnNames)));