Tabnine Logo
DeletionTime$Serializer.serializedSize
Code IndexAdd Tabnine to your IDE (free)

How to use
serializedSize
method
in
org.apache.cassandra.db.DeletionTime$Serializer

Best Java code snippets using org.apache.cassandra.db.DeletionTime$Serializer.serializedSize (Showing top 20 results out of 315)

origin: jsevellec/cassandra-unit

@Override
public IndexInfoRetriever openWithIndex(FileHandle indexFile)
{
  int fieldsSize = (int) DeletionTime.serializer.serializedSize(deletionTime)
           + TypeSizes.sizeof(0); // columnIndexCount
  indexEntrySizeHistogram.update(serializedSize);
  indexInfoCountHistogram.update(offsets.length);
  return new LegacyIndexInfoRetriever(indexFilePosition +
                    TypeSizes.sizeof(0L) + // position
                    TypeSizes.sizeof(0) + // indexInfoSize
                    fieldsSize,
                    offsets, indexFile.createReader(), idxInfoSerializer);
}
origin: com.facebook.presto.cassandra/cassandra-server

/**
 * Returns the number of bytes between the beginning of the row and the
 * first serialized column.
 */
private static long rowHeaderSize(ByteBuffer key, DeletionInfo delInfo)
{
  TypeSizes typeSizes = TypeSizes.NATIVE;
  // TODO fix constantSize when changing the nativeconststs.
  int keysize = key.remaining();
  return typeSizes.sizeof((short) keysize) + keysize          // Row key
     + DeletionTime.serializer.serializedSize(delInfo.getTopLevelDeletion(), typeSizes);
}
origin: com.strapdata.cassandra/cassandra-all

@Override
public IndexInfoRetriever openWithIndex(FileHandle indexFile)
{
  int fieldsSize = (int) DeletionTime.serializer.serializedSize(deletionTime)
           + TypeSizes.sizeof(0); // columnIndexCount
  indexEntrySizeHistogram.update(serializedSize);
  indexInfoCountHistogram.update(offsets.length);
  return new LegacyIndexInfoRetriever(indexFilePosition +
                    TypeSizes.sizeof(0L) + // position
                    TypeSizes.sizeof(0) + // indexInfoSize
                    fieldsSize,
                    offsets, indexFile.createReader(), idxInfoSerializer);
}
origin: org.apache.cassandra/cassandra-all

size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion);
size += legacyPartition.rangeTombstones.serializedSize(partition.metadata());
origin: jsevellec/cassandra-unit

size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion);
size += legacyPartition.rangeTombstones.serializedSize(partition.metadata());
origin: com.strapdata.cassandra/cassandra-all

size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion);
size += legacyPartition.rangeTombstones.serializedSize(partition.metadata());
origin: com.facebook.presto.cassandra/cassandra-server

  public int serializedSize(RowIndexEntry rie)
  {
    int size = TypeSizes.NATIVE.sizeof(rie.position) + TypeSizes.NATIVE.sizeof(rie.promotedSize(type));
    if (rie.isIndexed())
    {
      List<IndexHelper.IndexInfo> index = rie.columnsIndex();
      size += DeletionTime.serializer.serializedSize(rie.deletionTime(), TypeSizes.NATIVE);
      size += TypeSizes.NATIVE.sizeof(index.size());
      ISerializer<IndexHelper.IndexInfo> idxSerializer = type.indexSerializer();
      for (IndexHelper.IndexInfo info : index)
        size += idxSerializer.serializedSize(info, TypeSizes.NATIVE);
    }
    return size;
  }
}
origin: com.strapdata.cassandra/cassandra-all

  public long serializedSize(IndexInfo info)
  {
    assert version.storeRows() : "We read old index files but we should never write them";
    long size = clusteringSerializer.serializedSize(info.firstName)
          + clusteringSerializer.serializedSize(info.lastName)
          + TypeSizes.sizeofUnsignedVInt(info.offset)
          + TypeSizes.sizeofVInt(info.width - WIDTH_BASE)
          + TypeSizes.sizeof(info.endOpenMarker != null);
    if (info.endOpenMarker != null)
      size += DeletionTime.serializer.serializedSize(info.endOpenMarker);
    return size;
  }
}
origin: com.netflix.sstableadaptor/sstable-adaptor-cassandra

@Override
public IndexInfoRetriever openWithIndex(FileHandle indexFile)
{
  int fieldsSize = (int) DeletionTime.serializer.serializedSize(deletionTime)
           + TypeSizes.sizeof(0); // columnIndexCount
  indexEntrySizeHistogram.update(serializedSize);
  indexInfoCountHistogram.update(offsets.length);
  return new LegacyIndexInfoRetriever(indexFilePosition +
                    TypeSizes.sizeof(0L) + // position
                    TypeSizes.sizeof(0) + // indexInfoSize
                    fieldsSize,
                    offsets, indexFile.createReader(), idxInfoSerializer);
}
origin: jsevellec/cassandra-unit

  public long serializedSize(IndexInfo info)
  {
    assert version.storeRows() : "We read old index files but we should never write them";
    long size = clusteringSerializer.serializedSize(info.firstName)
          + clusteringSerializer.serializedSize(info.lastName)
          + TypeSizes.sizeofUnsignedVInt(info.offset)
          + TypeSizes.sizeofVInt(info.width - WIDTH_BASE)
          + TypeSizes.sizeof(info.endOpenMarker != null);
    if (info.endOpenMarker != null)
      size += DeletionTime.serializer.serializedSize(info.endOpenMarker);
    return size;
  }
}
origin: org.apache.cassandra/cassandra-all

@Override
public IndexInfoRetriever openWithIndex(FileHandle indexFile)
{
  int fieldsSize = (int) DeletionTime.serializer.serializedSize(deletionTime)
           + TypeSizes.sizeof(0); // columnIndexCount
  indexEntrySizeHistogram.update(serializedSize);
  indexInfoCountHistogram.update(offsets.length);
  return new LegacyIndexInfoRetriever(indexFilePosition +
                    TypeSizes.sizeof(0L) + // position
                    TypeSizes.sizeof(0) + // indexInfoSize
                    fieldsSize,
                    offsets, indexFile.createReader(), idxInfoSerializer);
}
origin: com.netflix.sstableadaptor/sstable-adaptor-cassandra

  public long serializedSize(IndexInfo info)
  {
    assert version.storeRows() : "We read old index files but we should never write them";
    long size = clusteringSerializer.serializedSize(info.firstName)
          + clusteringSerializer.serializedSize(info.lastName)
          + TypeSizes.sizeofUnsignedVInt(info.offset)
          + TypeSizes.sizeofVInt(info.width - WIDTH_BASE)
          + TypeSizes.sizeof(info.endOpenMarker != null);
    if (info.endOpenMarker != null)
      size += DeletionTime.serializer.serializedSize(info.endOpenMarker);
    return size;
  }
}
origin: com.strapdata.cassandra/cassandra-all

private static int serializedSize(DeletionTime deletionTime, long headerLength, int columnIndexCount)
{
  return TypeSizes.sizeofUnsignedVInt(headerLength)
      + (int) DeletionTime.serializer.serializedSize(deletionTime)
      + TypeSizes.sizeofUnsignedVInt(columnIndexCount);
}
origin: com.facebook.presto.cassandra/cassandra-server

@Override
public int promotedSize(CType type)
{
  TypeSizes typeSizes = TypeSizes.NATIVE;
  long size = DeletionTime.serializer.serializedSize(deletionTime, typeSizes);
  size += typeSizes.sizeof(columnsIndex.size()); // number of entries
  ISerializer<IndexHelper.IndexInfo> idxSerializer = type.indexSerializer();
  for (IndexHelper.IndexInfo info : columnsIndex)
    size += idxSerializer.serializedSize(info, typeSizes);
  return Ints.checkedCast(size);
}
origin: org.apache.cassandra/cassandra-all

  public long serializedSize(IndexInfo info)
  {
    assert version.storeRows() : "We read old index files but we should never write them";
    long size = clusteringSerializer.serializedSize(info.firstName)
          + clusteringSerializer.serializedSize(info.lastName)
          + TypeSizes.sizeofUnsignedVInt(info.offset)
          + TypeSizes.sizeofVInt(info.width - WIDTH_BASE)
          + TypeSizes.sizeof(info.endOpenMarker != null);
    if (info.endOpenMarker != null)
      size += DeletionTime.serializer.serializedSize(info.endOpenMarker);
    return size;
  }
}
origin: com.facebook.presto.cassandra/cassandra-server

public long serializedSize(DeletionInfo info, TypeSizes typeSizes, int version)
{
  long size = DeletionTime.serializer.serializedSize(info.topLevel, typeSizes);
  return size + rtlSerializer.serializedSize(info.ranges, typeSizes, version);
}
origin: com.netflix.sstableadaptor/sstable-adaptor-cassandra

private static int serializedSize(DeletionTime deletionTime, long headerLength, int columnIndexCount)
{
  return TypeSizes.sizeofUnsignedVInt(headerLength)
      + (int) DeletionTime.serializer.serializedSize(deletionTime)
      + TypeSizes.sizeofUnsignedVInt(columnIndexCount);
}
origin: com.facebook.presto.cassandra/cassandra-server

  public long serializedSizeForSSTable(RangeTombstone t)
  {
    TypeSizes typeSizes = TypeSizes.NATIVE;
    return type.serializer().serializedSize(t.min, typeSizes)
       + 1 // serialization flag
       + type.serializer().serializedSize(t.max, typeSizes)
       + DeletionTime.serializer.serializedSize(t.data, typeSizes);
  }
}
origin: org.apache.cassandra/cassandra-all

private static int serializedSize(DeletionTime deletionTime, long headerLength, int columnIndexCount)
{
  return TypeSizes.sizeofUnsignedVInt(headerLength)
      + (int) DeletionTime.serializer.serializedSize(deletionTime)
      + TypeSizes.sizeofUnsignedVInt(columnIndexCount);
}
origin: jsevellec/cassandra-unit

private static int serializedSize(DeletionTime deletionTime, long headerLength, int columnIndexCount)
{
  return TypeSizes.sizeofUnsignedVInt(headerLength)
      + (int) DeletionTime.serializer.serializedSize(deletionTime)
      + TypeSizes.sizeofUnsignedVInt(columnIndexCount);
}
org.apache.cassandra.dbDeletionTime$SerializerserializedSize

Popular methods of DeletionTime$Serializer

  • deserialize
  • serialize
  • skip

Popular in Java

  • Creating JSON documents from java classes using gson
  • getSharedPreferences (Context)
  • setRequestProperty (URLConnection)
  • getSystemService (Context)
  • VirtualMachine (com.sun.tools.attach)
    A Java virtual machine. A VirtualMachine represents a Java virtual machine to which this Java vir
  • ConnectException (java.net)
    A ConnectException is thrown if a connection cannot be established to a remote host on a specific po
  • Arrays (java.util)
    This class contains various methods for manipulating arrays (such as sorting and searching). This cl
  • ThreadPoolExecutor (java.util.concurrent)
    An ExecutorService that executes each submitted task using one of possibly several pooled threads, n
  • BasicDataSource (org.apache.commons.dbcp)
    Basic implementation of javax.sql.DataSource that is configured via JavaBeans properties. This is no
  • SAXParseException (org.xml.sax)
    Encapsulate an XML parse error or warning.> This module, both source code and documentation, is in t
  • CodeWhisperer alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now