Cell deserializeColumnBody(DataInput in, CellName name, int mask, ColumnSerializer.Flag flag, int expireBefore) throws IOException { if ((mask & COUNTER_MASK) != 0) { long timestampOfLastDelete = in.readLong(); long ts = in.readLong(); ByteBuffer value = ByteBufferUtil.readWithLength(in); return BufferCounterCell.create(name, value, ts, timestampOfLastDelete, flag); } else if ((mask & EXPIRATION_MASK) != 0) { int ttl = in.readInt(); int expiration = in.readInt(); long ts = in.readLong(); ByteBuffer value = ByteBufferUtil.readWithLength(in); return BufferExpiringCell.create(name, value, ts, ttl, expiration, expireBefore, flag); } else { long ts = in.readLong(); ByteBuffer value = ByteBufferUtil.readWithLength(in); return (mask & COUNTER_UPDATE_MASK) != 0 ? new BufferCounterUpdateCell(name, value, ts) : ((mask & DELETION_MASK) == 0 ? new BufferCell(name, value, ts) : new BufferDeletedCell(name, value, ts)); } }
/** * Deserializes the first and last key stored in the summary * * Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used. */ public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException { in.skipBytes(4); // minIndexInterval int offsetCount = in.readInt(); long offheapSize = in.readLong(); if (haveSamplingLevel) in.skipBytes(8); // samplingLevel, fullSamplingSummarySize in.skip(offsetCount * 4); in.skip(offheapSize - offsetCount * 4); DecoratedKey first = partitioner.decorateKey(ByteBufferUtil.readWithLength(in)); DecoratedKey last = partitioner.decorateKey(ByteBufferUtil.readWithLength(in)); return Pair.create(first, last); } }
/** * Deserializes the first and last key stored in the summary * * Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used. */ public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException { in.skipBytes(4); // minIndexInterval int offsetCount = in.readInt(); long offheapSize = in.readLong(); if (haveSamplingLevel) in.skipBytes(8); // samplingLevel, fullSamplingSummarySize in.skip(offsetCount * 4); in.skip(offheapSize - offsetCount * 4); DecoratedKey first = partitioner.decorateKey(ByteBufferUtil.readWithLength(in)); DecoratedKey last = partitioner.decorateKey(ByteBufferUtil.readWithLength(in)); return Pair.create(first, last); } }
/** * Deserializes the first and last key stored in the summary * * Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used. */ public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException { in.skipBytes(4); // minIndexInterval int offsetCount = in.readInt(); long offheapSize = in.readLong(); if (haveSamplingLevel) in.skipBytes(8); // samplingLevel, fullSamplingSummarySize in.skip(offsetCount * 4); in.skip(offheapSize - offsetCount * 4); DecoratedKey first = partitioner.decorateKey(ByteBufferUtil.readWithLength(in)); DecoratedKey last = partitioner.decorateKey(ByteBufferUtil.readWithLength(in)); return Pair.create(first, last); } }
/** * Deserializes the first and last key stored in the summary * * Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used. */ public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException { in.skipBytes(4); // minIndexInterval int offsetCount = in.readInt(); long offheapSize = in.readLong(); if (haveSamplingLevel) in.skipBytes(8); // samplingLevel, fullSamplingSummarySize in.skip(offsetCount * 4); in.skip(offheapSize - offsetCount * 4); DecoratedKey first = partitioner.decorateKey(ByteBufferUtil.readWithLength(in)); DecoratedKey last = partitioner.decorateKey(ByteBufferUtil.readWithLength(in)); return Pair.create(first, last); } }
iStream, getPartitioner(), descriptor.version.hasSamplingLevel(), metadata.params.minIndexInterval, metadata.params.maxIndexInterval); first = decorateKey(ByteBufferUtil.readWithLength(iStream)); last = decorateKey(ByteBufferUtil.readWithLength(iStream));
iStream, getPartitioner(), descriptor.version.hasSamplingLevel(), metadata.params.minIndexInterval, metadata.params.maxIndexInterval); first = decorateKey(ByteBufferUtil.readWithLength(iStream)); last = decorateKey(ByteBufferUtil.readWithLength(iStream));
iStream, getPartitioner(), descriptor.version.hasSamplingLevel(), metadata.params.minIndexInterval, metadata.params.maxIndexInterval); first = decorateKey(ByteBufferUtil.readWithLength(iStream)); last = decorateKey(ByteBufferUtil.readWithLength(iStream));
final ByteBuffer partitionKey = ByteBufferUtil.readWithLength(in); final ByteBuffer cellName = ByteBufferUtil.readWithLength(in); if (cfs == null || !cfs.metadata.isCounter() || !cfs.isCounterCacheEnabled()) return null;
final ByteBuffer partitionKey = ByteBufferUtil.readWithLength(in); final ByteBuffer cellName = ByteBufferUtil.readWithLength(in); if (cfs == null || !cfs.metadata.isCounter() || !cfs.isCounterCacheEnabled()) return null;
final ByteBuffer partitionKey = ByteBufferUtil.readWithLength(in); ByteBuffer cellNameBuffer = ByteBufferUtil.readWithLength(in); if (cfs == null || !cfs.metadata.isCounter() || !cfs.isCounterCacheEnabled()) return null;
ByteBuffer value = ByteBufferUtil.readWithLength(in); if (flag == SerializationHelper.Flag.FROM_REMOTE || (flag == SerializationHelper.Flag.LOCAL && CounterContext.instance().shouldClearLocal(value))) value = CounterContext.instance().clearAllLocal(value); int expiration = in.readInt(); long ts = in.readLong(); ByteBuffer value = ByteBufferUtil.readWithLength(in); return new LegacyCell(LegacyCell.Kind.EXPIRING, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, expiration, ttl); ByteBuffer value = ByteBufferUtil.readWithLength(in); LegacyCellName name = decodeCellName(metadata, cellname, readAllAsDynamic); return (mask & COUNTER_UPDATE_MASK) != 0
ByteBuffer value = ByteBufferUtil.readWithLength(in); if (flag == SerializationHelper.Flag.FROM_REMOTE || (flag == SerializationHelper.Flag.LOCAL && CounterContext.instance().shouldClearLocal(value))) value = CounterContext.instance().clearAllLocal(value); int expiration = in.readInt(); long ts = in.readLong(); ByteBuffer value = ByteBufferUtil.readWithLength(in); return new LegacyCell(LegacyCell.Kind.EXPIRING, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, expiration, ttl); ByteBuffer value = ByteBufferUtil.readWithLength(in); LegacyCellName name = decodeCellName(metadata, cellname, readAllAsDynamic); return (mask & COUNTER_UPDATE_MASK) != 0
ByteBuffer value = ByteBufferUtil.readWithLength(in); if (flag == SerializationHelper.Flag.FROM_REMOTE || (flag == SerializationHelper.Flag.LOCAL && CounterContext.instance().shouldClearLocal(value))) value = CounterContext.instance().clearAllLocal(value); int expiration = in.readInt(); long ts = in.readLong(); ByteBuffer value = ByteBufferUtil.readWithLength(in); return new LegacyCell(LegacyCell.Kind.EXPIRING, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, expiration, ttl); ByteBuffer value = ByteBufferUtil.readWithLength(in); LegacyCellName name = decodeCellName(metadata, cellname, readAllAsDynamic); return (mask & COUNTER_UPDATE_MASK) != 0
ByteBuffer value = ByteBufferUtil.readWithLength(in); if (flag == SerializationHelper.Flag.FROM_REMOTE || (flag == SerializationHelper.Flag.LOCAL && CounterContext.instance().shouldClearLocal(value))) value = CounterContext.instance().clearAllLocal(value); int expiration = in.readInt(); long ts = in.readLong(); ByteBuffer value = ByteBufferUtil.readWithLength(in); return new LegacyCell(LegacyCell.Kind.EXPIRING, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, expiration, ttl); ByteBuffer value = ByteBufferUtil.readWithLength(in); LegacyCellName name = decodeCellName(metadata, cellname, readAllAsDynamic); return (mask & COUNTER_UPDATE_MASK) != 0
public Future<Pair<RowCacheKey, IRowCacheEntry>> deserialize(DataInputStream in, final ColumnFamilyStore cfs) throws IOException { //Keyspace and CF name are deserialized by AutoSaving cache and used to fetch the CFS provided as a //parameter so they aren't deserialized here, even though they are serialized by this serializer final ByteBuffer buffer = ByteBufferUtil.readWithLength(in); if (cfs == null || !cfs.isRowCacheEnabled()) return null; assert(!cfs.isIndex()); return StageManager.getStage(Stage.READ).submit(new Callable<Pair<RowCacheKey, IRowCacheEntry>>() { public Pair<RowCacheKey, IRowCacheEntry> call() throws Exception { DecoratedKey key = cfs.partitioner.decorateKey(buffer); QueryFilter cacheFilter = new QueryFilter(key, cfs.getColumnFamilyName(), cfs.readFilterForCache(), Integer.MIN_VALUE); ColumnFamily data = cfs.getTopLevelColumns(cacheFilter, Integer.MIN_VALUE); return Pair.create(new RowCacheKey(cfs.metadata.ksAndCFName, key), (IRowCacheEntry) data); } }); } }
public Future<Pair<RowCacheKey, IRowCacheEntry>> deserialize(DataInputPlus in, final ColumnFamilyStore cfs) throws IOException { //Keyspace and CF name are deserialized by AutoSaving cache and used to fetch the CFS provided as a //parameter so they aren't deserialized here, even though they are serialized by this serializer final ByteBuffer buffer = ByteBufferUtil.readWithLength(in); if (cfs == null || !cfs.isRowCacheEnabled()) return null; final int rowsToCache = cfs.metadata.params.caching.rowsPerPartitionToCache(); assert(!cfs.isIndex());//Shouldn't have row cache entries for indexes return StageManager.getStage(Stage.READ).submit(new Callable<Pair<RowCacheKey, IRowCacheEntry>>() { public Pair<RowCacheKey, IRowCacheEntry> call() throws Exception { DecoratedKey key = cfs.decorateKey(buffer); int nowInSec = FBUtilities.nowInSeconds(); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.fullPartitionRead(cfs.metadata, nowInSec, key); try (ReadExecutionController controller = cmd.executionController(); UnfilteredRowIterator iter = cmd.queryMemtableAndDisk(cfs, controller)) { CachedPartition toCache = CachedBTreePartition.create(DataLimits.cqlLimits(rowsToCache).filter(iter, nowInSec, true), nowInSec); return Pair.create(new RowCacheKey(cfs.metadata.ksAndCFName, key), (IRowCacheEntry)toCache); } } }); } }
public Future<Pair<RowCacheKey, IRowCacheEntry>> deserialize(DataInputPlus in, final ColumnFamilyStore cfs) throws IOException { //Keyspace and CF name are deserialized by AutoSaving cache and used to fetch the CFS provided as a //parameter so they aren't deserialized here, even though they are serialized by this serializer final ByteBuffer buffer = ByteBufferUtil.readWithLength(in); if (cfs == null || !cfs.isRowCacheEnabled()) return null; final int rowsToCache = cfs.metadata.params.caching.rowsPerPartitionToCache(); assert(!cfs.isIndex());//Shouldn't have row cache entries for indexes return StageManager.getStage(Stage.READ).submit(new Callable<Pair<RowCacheKey, IRowCacheEntry>>() { public Pair<RowCacheKey, IRowCacheEntry> call() throws Exception { DecoratedKey key = cfs.decorateKey(buffer); int nowInSec = FBUtilities.nowInSeconds(); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.fullPartitionRead(cfs.metadata, nowInSec, key); try (ReadExecutionController controller = cmd.executionController(); UnfilteredRowIterator iter = cmd.queryMemtableAndDisk(cfs, controller)) { CachedPartition toCache = CachedBTreePartition.create(DataLimits.cqlLimits(rowsToCache).filter(iter, nowInSec, true), nowInSec); return Pair.create(new RowCacheKey(cfs.metadata.ksAndCFName, key), (IRowCacheEntry)toCache); } } }); } }
public Future<Pair<RowCacheKey, IRowCacheEntry>> deserialize(DataInputPlus in, final ColumnFamilyStore cfs) throws IOException { //Keyspace and CF name are deserialized by AutoSaving cache and used to fetch the CFS provided as a //parameter so they aren't deserialized here, even though they are serialized by this serializer final ByteBuffer buffer = ByteBufferUtil.readWithLength(in); if (cfs == null || !cfs.isRowCacheEnabled()) return null; final int rowsToCache = cfs.metadata.params.caching.rowsPerPartitionToCache(); assert(!cfs.isIndex());//Shouldn't have row cache entries for indexes return StageManager.getStage(Stage.READ).submit(new Callable<Pair<RowCacheKey, IRowCacheEntry>>() { public Pair<RowCacheKey, IRowCacheEntry> call() throws Exception { DecoratedKey key = cfs.decorateKey(buffer); int nowInSec = FBUtilities.nowInSeconds(); SinglePartitionReadCommand cmd = SinglePartitionReadCommand.fullPartitionRead(cfs.metadata, nowInSec, key); try (ReadExecutionController controller = cmd.executionController(); UnfilteredRowIterator iter = cmd.queryMemtableAndDisk(cfs, controller)) { CachedPartition toCache = CachedBTreePartition.create(DataLimits.cqlLimits(rowsToCache).filter(iter, nowInSec, true), nowInSec); return Pair.create(new RowCacheKey(cfs.metadata.ksAndCFName, key), (IRowCacheEntry)toCache); } } }); } }
iStream, getPartitioner(), descriptor.version.hasSamplingLevel(), metadata.params.minIndexInterval, metadata.params.maxIndexInterval); first = decorateKey(ByteBufferUtil.readWithLength(iStream)); last = decorateKey(ByteBufferUtil.readWithLength(iStream));