congrats Icon
New! Tabnine Pro 14-day free trial
Start a free trial
Tabnine Logo
DeletionTime$Serializer
Code IndexAdd Tabnine to your IDE (free)

How to use
DeletionTime$Serializer
in
org.apache.cassandra.db

Best Java code snippets using org.apache.cassandra.db.DeletionTime$Serializer (Showing top 20 results out of 315)

origin: com.facebook.presto.cassandra/cassandra-server

public RowIndexEntry deserialize(DataInput in, Descriptor.Version version) throws IOException
{
  long position = in.readLong();
  int size = in.readInt();
  if (size > 0)
  {
    DeletionTime deletionTime = DeletionTime.serializer.deserialize(in);
    int entries = in.readInt();
    ISerializer<IndexHelper.IndexInfo> idxSerializer = type.indexSerializer();
    List<IndexHelper.IndexInfo> columnsIndex = new ArrayList<IndexHelper.IndexInfo>(entries);
    for (int i = 0; i < entries; i++)
      columnsIndex.add(idxSerializer.deserialize(in));
    return new IndexedEntry(position, deletionTime, columnsIndex);
  }
  else
  {
    return new RowIndexEntry(position);
  }
}
origin: com.facebook.presto.cassandra/cassandra-server

DeletionTime.serializer.serialize(emptyColumnFamily.deletionInfo().getTopLevelDeletion(), out);
origin: com.facebook.presto.cassandra/cassandra-server

  public int serializedSize(RowIndexEntry rie)
  {
    int size = TypeSizes.NATIVE.sizeof(rie.position) + TypeSizes.NATIVE.sizeof(rie.promotedSize(type));
    if (rie.isIndexed())
    {
      List<IndexHelper.IndexInfo> index = rie.columnsIndex();
      size += DeletionTime.serializer.serializedSize(rie.deletionTime(), TypeSizes.NATIVE);
      size += TypeSizes.NATIVE.sizeof(index.size());
      ISerializer<IndexHelper.IndexInfo> idxSerializer = type.indexSerializer();
      for (IndexHelper.IndexInfo info : index)
        size += idxSerializer.serializedSize(info, TypeSizes.NATIVE);
    }
    return size;
  }
}
origin: com.strapdata.cassandra/cassandra-all

size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion);
size += legacyPartition.rangeTombstones.serializedSize(partition.metadata());
origin: org.apache.cassandra/cassandra-all

size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion);
size += legacyPartition.rangeTombstones.serializedSize(partition.metadata());
origin: com.strapdata.cassandra/cassandra-all

DeletionTime.serializer.serialize(legacyPartition.partitionDeletion, out);
origin: jsevellec/cassandra-unit

DeletionTime.serializer.serialize(legacyPartition.partitionDeletion, out);
origin: jsevellec/cassandra-unit

size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion);
size += legacyPartition.rangeTombstones.serializedSize(partition.metadata());
origin: org.apache.cassandra/cassandra-all

DeletionTime.serializer.serialize(legacyPartition.partitionDeletion, out);
origin: jsevellec/cassandra-unit

/**
 * Constructor called from {@link LegacyShallowIndexedEntry#deserialize(org.apache.cassandra.io.util.DataInputPlus, long, org.apache.cassandra.io.sstable.IndexInfo.Serializer)}.
 * Only for legacy sstables.
 */
private IndexedEntry(long dataFilePosition, DataInputPlus in, IndexInfo.Serializer idxInfoSerializer) throws IOException
{
  super(dataFilePosition);
  long headerLength = 0;
  this.deletionTime = DeletionTime.serializer.deserialize(in);
  int columnsIndexCount = in.readInt();
  TrackedDataInputPlus trackedIn = new TrackedDataInputPlus(in);
  this.columnsIndex = new IndexInfo[columnsIndexCount];
  for (int i = 0; i < columnsIndexCount; i++)
  {
    this.columnsIndex[i] = idxInfoSerializer.deserialize(trackedIn);
    if (i == 0)
      headerLength = this.columnsIndex[i].offset;
  }
  this.headerLength = headerLength;
  this.offsets = null;
  this.indexedPartSize = (int) trackedIn.getBytesRead();
  this.idxInfoSerializer = idxInfoSerializer;
}
origin: com.strapdata.cassandra/cassandra-all

public IndexInfo deserialize(DataInputPlus in) throws IOException
{
  ClusteringPrefix firstName = clusteringSerializer.deserialize(in);
  ClusteringPrefix lastName = clusteringSerializer.deserialize(in);
  long offset;
  long width;
  DeletionTime endOpenMarker = null;
  if (version.storeRows())
  {
    offset = in.readUnsignedVInt();
    width = in.readVInt() + WIDTH_BASE;
    if (in.readBoolean())
      endOpenMarker = DeletionTime.serializer.deserialize(in);
  }
  else
  {
    offset = in.readLong();
    width = in.readLong();
  }
  return new IndexInfo(firstName, lastName, offset, width, endOpenMarker);
}
origin: org.apache.cassandra/cassandra-all

public IndexInfo deserialize(DataInputPlus in) throws IOException
{
  ClusteringPrefix firstName = clusteringSerializer.deserialize(in);
  ClusteringPrefix lastName = clusteringSerializer.deserialize(in);
  long offset;
  long width;
  DeletionTime endOpenMarker = null;
  if (version.storeRows())
  {
    offset = in.readUnsignedVInt();
    width = in.readVInt() + WIDTH_BASE;
    if (in.readBoolean())
      endOpenMarker = DeletionTime.serializer.deserialize(in);
  }
  else
  {
    offset = in.readLong();
    width = in.readLong();
  }
  return new IndexInfo(firstName, lastName, offset, width, endOpenMarker);
}
origin: jsevellec/cassandra-unit

public IndexInfo deserialize(DataInputPlus in) throws IOException
{
  ClusteringPrefix firstName = clusteringSerializer.deserialize(in);
  ClusteringPrefix lastName = clusteringSerializer.deserialize(in);
  long offset;
  long width;
  DeletionTime endOpenMarker = null;
  if (version.storeRows())
  {
    offset = in.readUnsignedVInt();
    width = in.readVInt() + WIDTH_BASE;
    if (in.readBoolean())
      endOpenMarker = DeletionTime.serializer.deserialize(in);
  }
  else
  {
    offset = in.readLong();
    width = in.readLong();
  }
  return new IndexInfo(firstName, lastName, offset, width, endOpenMarker);
}
origin: com.strapdata.cassandra/cassandra-all

public static SSTableIdentityIterator create(SSTableReader sstable, FileDataInput dfile, RowIndexEntry<?> indexEntry, DecoratedKey key, boolean tombstoneOnly)
{
  try
  {
    dfile.seek(indexEntry.position);
    ByteBufferUtil.skipShortLength(dfile); // Skip partition key
    DeletionTime partitionLevelDeletion = DeletionTime.serializer.deserialize(dfile);
    SerializationHelper helper = new SerializationHelper(sstable.metadata, sstable.descriptor.version.correspondingMessagingVersion(), SerializationHelper.Flag.LOCAL);
    SSTableSimpleIterator iterator = tombstoneOnly
        ? SSTableSimpleIterator.createTombstoneOnly(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion)
        : SSTableSimpleIterator.create(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion);
    return new SSTableIdentityIterator(sstable, key, partitionLevelDeletion, dfile.getPath(), iterator);
  }
  catch (IOException e)
  {
    sstable.markSuspect();
    throw new CorruptSSTableException(e, dfile.getPath());
  }
}
origin: com.netflix.sstableadaptor/sstable-adaptor-cassandra

public static SSTableIdentityIterator create(SSTableReader sstable, FileDataInput dfile, RowIndexEntry<?> indexEntry, DecoratedKey key, boolean tombstoneOnly)
{
  try
  {
    dfile.seek(indexEntry.position);
    ByteBufferUtil.skipShortLength(dfile); // Skip partition key
    DeletionTime partitionLevelDeletion = DeletionTime.serializer.deserialize(dfile);
    SerializationHelper helper = new SerializationHelper(sstable.metadata, sstable.descriptor.version.correspondingMessagingVersion(), SerializationHelper.Flag.LOCAL);
    SSTableSimpleIterator iterator = tombstoneOnly
        ? SSTableSimpleIterator.createTombstoneOnly(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion)
        : SSTableSimpleIterator.create(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion);
    return new SSTableIdentityIterator(sstable, key, partitionLevelDeletion, dfile.getPath(), iterator);
  }
  catch (IOException e)
  {
    sstable.markSuspect();
    throw new CorruptSSTableException(e, dfile.getPath());
  }
}
origin: com.strapdata.cassandra/cassandra-all

  public static LegacyDeletionInfo deserialize(CFMetaData metadata, DataInputPlus in) throws IOException
  {
    DeletionTime topLevel = DeletionTime.serializer.deserialize(in);
    int rangeCount = in.readInt();
    if (rangeCount == 0)
      return new LegacyDeletionInfo(new MutableDeletionInfo(topLevel));
    LegacyDeletionInfo delInfo = new LegacyDeletionInfo(new MutableDeletionInfo(topLevel));
    for (int i = 0; i < rangeCount; i++)
    {
      LegacyBound start = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), true);
      LegacyBound end = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), false);
      int delTime =  in.readInt();
      long markedAt = in.readLong();
      delInfo.add(metadata, new LegacyRangeTombstone(start, end, new DeletionTime(markedAt, delTime)));
    }
    return delInfo;
  }
}
origin: com.netflix.sstableadaptor/sstable-adaptor-cassandra

  public static LegacyDeletionInfo deserialize(CFMetaData metadata, DataInputPlus in) throws IOException
  {
    DeletionTime topLevel = DeletionTime.serializer.deserialize(in);
    int rangeCount = in.readInt();
    if (rangeCount == 0)
      return new LegacyDeletionInfo(new MutableDeletionInfo(topLevel));
    LegacyDeletionInfo delInfo = new LegacyDeletionInfo(new MutableDeletionInfo(topLevel));
    for (int i = 0; i < rangeCount; i++)
    {
      LegacyBound start = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), true);
      LegacyBound end = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), false);
      int delTime =  in.readInt();
      long markedAt = in.readLong();
      delInfo.add(metadata, new LegacyRangeTombstone(start, end, new DeletionTime(markedAt, delTime)));
    }
    return delInfo;
  }
}
origin: jsevellec/cassandra-unit

public static SSTableIdentityIterator create(SSTableReader sstable, FileDataInput dfile, RowIndexEntry<?> indexEntry, DecoratedKey key, boolean tombstoneOnly)
{
  try
  {
    dfile.seek(indexEntry.position);
    ByteBufferUtil.skipShortLength(dfile); // Skip partition key
    DeletionTime partitionLevelDeletion = DeletionTime.serializer.deserialize(dfile);
    SerializationHelper helper = new SerializationHelper(sstable.metadata, sstable.descriptor.version.correspondingMessagingVersion(), SerializationHelper.Flag.LOCAL);
    SSTableSimpleIterator iterator = tombstoneOnly
        ? SSTableSimpleIterator.createTombstoneOnly(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion)
        : SSTableSimpleIterator.create(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion);
    return new SSTableIdentityIterator(sstable, key, partitionLevelDeletion, dfile.getPath(), iterator);
  }
  catch (IOException e)
  {
    sstable.markSuspect();
    throw new CorruptSSTableException(e, dfile.getPath());
  }
}
origin: org.apache.cassandra/cassandra-all

public static SSTableIdentityIterator create(SSTableReader sstable, FileDataInput dfile, RowIndexEntry<?> indexEntry, DecoratedKey key, boolean tombstoneOnly)
{
  try
  {
    dfile.seek(indexEntry.position);
    ByteBufferUtil.skipShortLength(dfile); // Skip partition key
    DeletionTime partitionLevelDeletion = DeletionTime.serializer.deserialize(dfile);
    SerializationHelper helper = new SerializationHelper(sstable.metadata, sstable.descriptor.version.correspondingMessagingVersion(), SerializationHelper.Flag.LOCAL);
    SSTableSimpleIterator iterator = tombstoneOnly
        ? SSTableSimpleIterator.createTombstoneOnly(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion)
        : SSTableSimpleIterator.create(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion);
    return new SSTableIdentityIterator(sstable, key, partitionLevelDeletion, dfile.getPath(), iterator);
  }
  catch (IOException e)
  {
    sstable.markSuspect();
    throw new CorruptSSTableException(e, dfile.getPath());
  }
}
origin: com.strapdata.cassandra/cassandra-all

/**
 * Constructor called from {@link Serializer#deserializeForCache(org.apache.cassandra.io.util.DataInputPlus)}.
 */
private IndexedEntry(long dataFilePosition, DataInputPlus in, IndexInfo.Serializer idxInfoSerializer, Version version) throws IOException
{
  super(dataFilePosition);
  this.headerLength = in.readUnsignedVInt();
  this.deletionTime = DeletionTime.serializer.deserialize(in);
  int columnsIndexCount = (int) in.readUnsignedVInt();
  TrackedDataInputPlus trackedIn = new TrackedDataInputPlus(in);
  this.columnsIndex = new IndexInfo[columnsIndexCount];
  for (int i = 0; i < columnsIndexCount; i++)
    this.columnsIndex[i] = idxInfoSerializer.deserialize(trackedIn);
  this.offsets = null;
  this.indexedPartSize = (int) trackedIn.getBytesRead();
  this.idxInfoSerializer = idxInfoSerializer;
}
org.apache.cassandra.dbDeletionTime$Serializer

Most used methods

  • deserialize
  • serialize
  • serializedSize
  • skip

Popular in Java

  • Reactive rest calls using spring rest template
  • notifyDataSetChanged (ArrayAdapter)
  • scheduleAtFixedRate (Timer)
  • addToBackStack (FragmentTransaction)
  • Pointer (com.sun.jna)
    An abstraction for a native pointer data type. A Pointer instance represents, on the Java side, a na
  • Arrays (java.util)
    This class contains various methods for manipulating arrays (such as sorting and searching). This cl
  • Map (java.util)
    A Map is a data structure consisting of a set of keys and values in which each key is mapped to a si
  • Timer (java.util)
    Timers schedule one-shot or recurring TimerTask for execution. Prefer java.util.concurrent.Scheduled
  • TreeMap (java.util)
    Walk the nodes of the tree left-to-right or right-to-left. Note that in descending iterations, next
  • Option (scala)
  • Sublime Text for Python
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now