public PagedRangeCommand deserialize(DataInput in, int version) throws IOException { String keyspace = in.readUTF(); String columnFamily = in.readUTF(); long timestamp = in.readLong(); AbstractBounds<RowPosition> keyRange = AbstractBounds.serializer.deserialize(in, version).toRowBounds(); CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily); if (metadata == null) { String message = String.format("Got paged range command for nonexistent table %s.%s. If the table was just " + "created, this is likely due to the schema not being fully propagated. Please wait for schema " + "agreement on table creation." , keyspace, columnFamily); throw new UnknownColumnFamilyException(message, null); } SliceQueryFilter predicate = metadata.comparator.sliceQueryFilterSerializer().deserialize(in, version); Composite start = metadata.comparator.serializer().deserialize(in); Composite stop = metadata.comparator.serializer().deserialize(in); int filterCount = in.readInt(); List<IndexExpression> rowFilter = new ArrayList<IndexExpression>(filterCount); for (int i = 0; i < filterCount; i++) { rowFilter.add(IndexExpression.readFrom(in)); } int limit = in.readInt(); boolean countCQL3Rows = version >= MessagingService.VERSION_21 ? in.readBoolean() : predicate.compositesToGroup >= 0 || predicate.count != 1; // See #6857 return new PagedRangeCommand(keyspace, columnFamily, timestamp, keyRange, predicate, start, stop, rowFilter, limit, countCQL3Rows); }
public static void validateComposite(Composite name, CType type) throws InvalidRequestException { long serializedSize = type.serializer().serializedSize(name, TypeSizes.NATIVE); if (serializedSize > Cell.MAX_NAME_LENGTH) throw new InvalidRequestException(String.format("The sum of all clustering columns is too long (%s > %s)", serializedSize, Cell.MAX_NAME_LENGTH)); }
public RangeTombstoneList deserialize(DataInput in, int version) throws IOException { int size = in.readInt(); if (size == 0) return null; RangeTombstoneList tombstones = new RangeTombstoneList(type, size); for (int i = 0; i < size; i++) { Composite start = type.serializer().deserialize(in); Composite end = type.serializer().deserialize(in); int delTime = in.readInt(); long markedAt = in.readLong(); if (version >= MessagingService.VERSION_20) { tombstones.setInternal(i, start, end, markedAt, delTime); } else { /* * The old implementation used to have range sorted by left value, but with potentially * overlapping range. So we need to use the "slow" path. */ tombstones.add(start, end, markedAt, delTime); } } // The "slow" path take care of updating the size, but not the fast one if (version >= MessagingService.VERSION_20) tombstones.size = size; return tombstones; }
public long serializedSize(PagedRangeCommand cmd, int version) { long size = 0; size += TypeSizes.NATIVE.sizeof(cmd.keyspace); size += TypeSizes.NATIVE.sizeof(cmd.columnFamily); size += TypeSizes.NATIVE.sizeof(cmd.timestamp); size += AbstractBounds.serializer.serializedSize(cmd.keyRange, version); CFMetaData metadata = Schema.instance.getCFMetaData(cmd.keyspace, cmd.columnFamily); size += metadata.comparator.sliceQueryFilterSerializer().serializedSize((SliceQueryFilter)cmd.predicate, version); size += metadata.comparator.serializer().serializedSize(cmd.start, TypeSizes.NATIVE); size += metadata.comparator.serializer().serializedSize(cmd.stop, TypeSizes.NATIVE); size += TypeSizes.NATIVE.sizeof(cmd.rowFilter.size()); for (IndexExpression expr : cmd.rowFilter) { size += TypeSizes.NATIVE.sizeofWithShortLength(expr.column); size += TypeSizes.NATIVE.sizeof(expr.operator.ordinal()); size += TypeSizes.NATIVE.sizeofWithShortLength(expr.value); } size += TypeSizes.NATIVE.sizeof(cmd.limit); if (version >= MessagingService.VERSION_21) size += TypeSizes.NATIVE.sizeof(cmd.countCQL3Rows); return size; } }
public void serialize(PagedRangeCommand cmd, DataOutputPlus out, int version) throws IOException { out.writeUTF(cmd.keyspace); out.writeUTF(cmd.columnFamily); out.writeLong(cmd.timestamp); AbstractBounds.serializer.serialize(cmd.keyRange, out, version); CFMetaData metadata = Schema.instance.getCFMetaData(cmd.keyspace, cmd.columnFamily); // SliceQueryFilter (the count is not used) SliceQueryFilter filter = (SliceQueryFilter)cmd.predicate; metadata.comparator.sliceQueryFilterSerializer().serialize(filter, out, version); // The start and stop of the page metadata.comparator.serializer().serialize(cmd.start, out); metadata.comparator.serializer().serialize(cmd.stop, out); out.writeInt(cmd.rowFilter.size()); for (IndexExpression expr : cmd.rowFilter) { expr.writeTo(out);; } out.writeInt(cmd.limit); if (version >= MessagingService.VERSION_21) out.writeBoolean(cmd.countCQL3Rows); }
public OnDiskAtom deserializeFromSSTable(DataInput in, ColumnSerializer.Flag flag, int expireBefore, Descriptor.Version version) throws IOException { Composite name = type.serializer().deserialize(in); if (name.isEmpty()) { // SSTableWriter.END_OF_ROW return null; } int b = in.readUnsignedByte(); if ((b & ColumnSerializer.RANGE_TOMBSTONE_MASK) != 0) return type.rangeTombstoneSerializer().deserializeBody(in, name, version); else return type.columnSerializer().deserializeColumnBody(in, (CellName)name, b, flag, expireBefore); }
public long serializedSize(RangeTombstoneList tombstones, TypeSizes typeSizes, int version) { if (tombstones == null) return typeSizes.sizeof(0); long size = typeSizes.sizeof(tombstones.size); for (int i = 0; i < tombstones.size; i++) { size += type.serializer().serializedSize(tombstones.starts[i], typeSizes); size += type.serializer().serializedSize(tombstones.ends[i], typeSizes); size += typeSizes.sizeof(tombstones.delTimes[i]); size += typeSizes.sizeof(tombstones.markedAts[i]); } return size; }
public void serialize(RangeTombstoneList tombstones, DataOutputPlus out, int version) throws IOException { if (tombstones == null) { out.writeInt(0); return; } out.writeInt(tombstones.size); for (int i = 0; i < tombstones.size; i++) { type.serializer().serialize(tombstones.starts[i], out); type.serializer().serialize(tombstones.ends[i], out); out.writeInt(tombstones.delTimes[i]); out.writeLong(tombstones.markedAts[i]); } }
public void serialize(CellName c, DataOutputPlus out) throws IOException { serializer().serialize(c, out); }
public RangeTombstone deserializeFromSSTable(DataInput in, Descriptor.Version version) throws IOException { Composite min = type.serializer().deserialize(in); int b = in.readUnsignedByte(); assert (b & ColumnSerializer.RANGE_TOMBSTONE_MASK) != 0; return deserializeBody(in, min, version); }
public long serializedSize(CellName c, TypeSizes type) { return serializer().serializedSize(c, type); } };
public void skipBody(DataInput in, Descriptor.Version version) throws IOException { type.serializer().skip(in); DeletionTime.serializer.skip(in); }
public long serializedSize(IndexInfo info, TypeSizes typeSizes) { return type.serializer().serializedSize(info.firstName, typeSizes) + type.serializer().serializedSize(info.lastName, typeSizes) + typeSizes.sizeof(info.offset) + typeSizes.sizeof(info.width); } }
public RangeTombstone deserializeBody(DataInput in, Composite min, Descriptor.Version version) throws IOException { Composite max = type.serializer().deserialize(in); DeletionTime dt = DeletionTime.serializer.deserialize(in); // If the max equals the min.end(), we can avoid keeping an extra ByteBuffer in memory by using // min.end() instead of max Composite minEnd = min.end(); max = minEnd.equals(max) ? minEnd : max; return new RangeTombstone(min, max, dt); }
public long serializedSizeForSSTable(RangeTombstone t) { TypeSizes typeSizes = TypeSizes.NATIVE; return type.serializer().serializedSize(t.min, typeSizes) + 1 // serialization flag + type.serializer().serializedSize(t.max, typeSizes) + DeletionTime.serializer.serializedSize(t.data, typeSizes); } }
public CellName deserialize(DataInput in) throws IOException { Composite ct = serializer().deserialize(in); if (ct.isEmpty()) throw ColumnSerializer.CorruptColumnException.create(in, ByteBufferUtil.EMPTY_BYTE_BUFFER); assert ct instanceof CellName : ct; return (CellName)ct; }
public IndexInfo deserialize(DataInput in) throws IOException { return new IndexInfo(type.serializer().deserialize(in), type.serializer().deserialize(in), in.readLong(), in.readLong()); }