Tabnine Logo
FastDiffDeltaEncoder
Code IndexAdd Tabnine to your IDE (free)

How to use
FastDiffDeltaEncoder
in
org.apache.hadoop.hbase.io.encoding

Best Java code snippets using org.apache.hadoop.hbase.io.encoding.FastDiffDeltaEncoder (Showing top 20 results out of 315)

origin: apache/hbase

@Override
protected ByteBuffer internalDecodeKeyValues(DataInputStream source, int allocateHeaderLength,
  int skipLastBytes, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
 int decompressedSize = source.readInt();
 ByteBuffer buffer = ByteBuffer.allocate(decompressedSize +
   allocateHeaderLength);
 buffer.position(allocateHeaderLength);
 FastDiffCompressionState state = new FastDiffCompressionState();
 while (source.available() > skipLastBytes) {
  uncompressSingleKeyValue(source, buffer, state);
  afterDecodingKeyValue(source, buffer, decodingCtx);
 }
 if (source.available() != skipLastBytes) {
  throw new IllegalStateException("Read too much bytes.");
 }
 return buffer;
}
origin: apache/hbase

@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
  DataOutputStream out) throws IOException {
 EncodingState state = encodingContext.getEncodingState();
 int size = compressSingleKeyValue(out, cell, state.prevCell);
 size += afterEncodingKeyValue(cell, out, encodingContext);
 state.prevCell = cell;
 return size;
}
origin: apache/hbase

ensureSpace(out, state.keyLength + state.valueLength + KeyValue.ROW_OFFSET);
origin: apache/hbase

int commonTimestampPrefix = findCommonTimestampPrefix(curTsBuf,
  Bytes.toBytes(prevCell.getTimestamp()));
origin: apache/hbase

@Override
public Cell getFirstKeyCellInBlock(ByteBuff block) {
 block.mark();
 block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
 int keyLength = ByteBuff.readCompressedInt(block);
 // TODO : See if we can avoid these reads as the read values are not getting used
 ByteBuff.readCompressedInt(block); // valueLength
 ByteBuff.readCompressedInt(block); // commonLength
 ByteBuffer key = block.asSubByteBuffer(keyLength).duplicate();
 block.reset();
 return createFirstKeyCell(key, keyLength);
}
origin: co.cask.hbase/hbase

int commonTimestampPrefix = findCommonTimestampPrefix(
  currentState, previousState);
flag |= commonTimestampPrefix << SHIFT_TIMESTAMP_LENGTH;
origin: org.apache.hbase/hbase-common

@Override
public Cell getFirstKeyCellInBlock(ByteBuff block) {
 block.mark();
 block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
 int keyLength = ByteBuff.readCompressedInt(block);
 // TODO : See if we can avoid these reads as the read values are not getting used
 ByteBuff.readCompressedInt(block); // valueLength
 ByteBuff.readCompressedInt(block); // commonLength
 ByteBuffer key = block.asSubByteBuffer(keyLength).duplicate();
 block.reset();
 return createFirstKeyCell(key, keyLength);
}
origin: org.apache.hbase/hbase-common

@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
  DataOutputStream out) throws IOException {
 EncodingState state = encodingContext.getEncodingState();
 int size = compressSingleKeyValue(out, cell, state.prevCell);
 size += afterEncodingKeyValue(cell, out, encodingContext);
 state.prevCell = cell;
 return size;
}
origin: com.aliyun.hbase/alihbase-common

@Override
protected ByteBuffer internalDecodeKeyValues(DataInputStream source, int allocateHeaderLength,
  int skipLastBytes, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
 int decompressedSize = source.readInt();
 ByteBuffer buffer = ByteBuffer.allocate(decompressedSize +
   allocateHeaderLength);
 buffer.position(allocateHeaderLength);
 FastDiffCompressionState state = new FastDiffCompressionState();
 while (source.available() > skipLastBytes) {
  uncompressSingleKeyValue(source, buffer, state);
  afterDecodingKeyValue(source, buffer, decodingCtx);
 }
 if (source.available() != skipLastBytes) {
  throw new IllegalStateException("Read too much bytes.");
 }
 return buffer;
}
origin: harbby/presto-connectors

int commonTimestampPrefix = findCommonTimestampPrefix(curTsBuf,
  Bytes.toBytes(prevCell.getTimestamp()));
origin: org.apache.hbase/hbase-common

ensureSpace(out, state.keyLength + state.valueLength + KeyValue.ROW_OFFSET);
origin: com.aliyun.hbase/alihbase-common

@Override
public Cell getFirstKeyCellInBlock(ByteBuff block) {
 block.mark();
 block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
 int keyLength = ByteBuff.readCompressedInt(block);
 // TODO : See if we can avoid these reads as the read values are not getting used
 ByteBuff.readCompressedInt(block); // valueLength
 ByteBuff.readCompressedInt(block); // commonLength
 ByteBuffer key = block.asSubByteBuffer(keyLength).duplicate();
 block.reset();
 return createFirstKeyCell(key, keyLength);
}
origin: com.aliyun.hbase/alihbase-common

@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
  DataOutputStream out) throws IOException {
 EncodingState state = encodingContext.getEncodingState();
 int size = compressSingleKeyValue(out, cell, state.prevCell);
 size += afterEncodingKeyValue(cell, out, encodingContext);
 state.prevCell = cell;
 return size;
}
origin: org.apache.hbase/hbase-common

@Override
protected ByteBuffer internalDecodeKeyValues(DataInputStream source, int allocateHeaderLength,
  int skipLastBytes, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
 int decompressedSize = source.readInt();
 ByteBuffer buffer = ByteBuffer.allocate(decompressedSize +
   allocateHeaderLength);
 buffer.position(allocateHeaderLength);
 FastDiffCompressionState state = new FastDiffCompressionState();
 while (source.available() > skipLastBytes) {
  uncompressSingleKeyValue(source, buffer, state);
  afterDecodingKeyValue(source, buffer, decodingCtx);
 }
 if (source.available() != skipLastBytes) {
  throw new IllegalStateException("Read too much bytes.");
 }
 return buffer;
}
origin: com.aliyun.hbase/alihbase-common

int commonTimestampPrefix = findCommonTimestampPrefix(curTsBuf,
  Bytes.toBytes(prevCell.getTimestamp()));
origin: com.aliyun.hbase/alihbase-common

ensureSpace(out, state.keyLength + state.valueLength + KeyValue.ROW_OFFSET);
origin: harbby/presto-connectors

@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
  DataOutputStream out) throws IOException {
 EncodingState state = encodingContext.getEncodingState();
 int size = compressSingleKeyValue(out, cell, state.prevCell);
 size += afterEncodingKeyValue(cell, out, encodingContext);
 state.prevCell = cell;
 return size;
}
origin: co.cask.hbase/hbase

@Override
public ByteBuffer uncompressKeyValues(DataInputStream source,
  int allocHeaderLength, int skipLastBytes, boolean includesMemstoreTS)
    throws IOException {
 int decompressedSize = source.readInt();
 ByteBuffer buffer = ByteBuffer.allocate(decompressedSize +
   allocHeaderLength);
 buffer.position(allocHeaderLength);
 FastDiffCompressionState state = new FastDiffCompressionState();
 while (source.available() > skipLastBytes) {
  uncompressSingleKeyValue(source, buffer, state);
  afterDecodingKeyValue(source, buffer, includesMemstoreTS);
 }
 if (source.available() != skipLastBytes) {
  throw new IllegalStateException("Read too much bytes.");
 }
 return buffer;
}
origin: org.apache.hbase/hbase-common

int commonTimestampPrefix = findCommonTimestampPrefix(curTsBuf,
  Bytes.toBytes(prevCell.getTimestamp()));
origin: harbby/presto-connectors

ensureSpace(out, state.keyLength + state.valueLength + KeyValue.ROW_OFFSET);
org.apache.hadoop.hbase.io.encodingFastDiffDeltaEncoder

Javadoc

Encoder similar to DiffKeyDeltaEncoder but supposedly faster. Compress using: - store size of common prefix - save column family once in the first KeyValue - use integer compression for key, value and prefix (7-bit encoding) - use bits to avoid duplication key length, value length and type if it same as previous - store in 3 bits length of prefix timestamp with previous KeyValue's timestamp - one bit which allow to omit value if it is the same Format: - 1 byte: flag - 1-5 bytes: key length (only if FLAG_SAME_KEY_LENGTH is not set in flag) - 1-5 bytes: value length (only if FLAG_SAME_VALUE_LENGTH is not set in flag) - 1-5 bytes: prefix length - ... bytes: rest of the row (if prefix length is small enough) - ... bytes: qualifier (or suffix depending on prefix length) - 1-8 bytes: timestamp suffix - 1 byte: type (only if FLAG_SAME_TYPE is not set in the flag) - ... bytes: value (only if FLAG_SAME_VALUE is not set in the flag)

Most used methods

  • afterDecodingKeyValue
  • afterEncodingKeyValue
  • compressSingleKeyValue
  • findCommonTimestampPrefix
  • uncompressSingleKeyValue
  • ensureSpace
  • createFirstKeyCell

Popular in Java

  • Making http requests using okhttp
  • startActivity (Activity)
  • scheduleAtFixedRate (Timer)
  • getSystemService (Context)
  • SimpleDateFormat (java.text)
    Formats and parses dates in a locale-sensitive manner. Formatting turns a Date into a String, and pa
  • Random (java.util)
    This class provides methods that return pseudo-random values.It is dangerous to seed Random with the
  • TreeMap (java.util)
    Walk the nodes of the tree left-to-right or right-to-left. Note that in descending iterations, next
  • HttpServlet (javax.servlet.http)
    Provides an abstract class to be subclassed to create an HTTP servlet suitable for a Web site. A sub
  • Logger (org.apache.log4j)
    This is the central class in the log4j package. Most logging operations, except configuration, are d
  • Scheduler (org.quartz)
    This is the main interface of a Quartz Scheduler. A Scheduler maintains a registry of org.quartz.Job
  • From CI to AI: The AI layer in your organization
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now