/** An additional sanity-check in case no compression or encryption is being used. */ @VisibleForTesting void sanityCheckUncompressed() throws IOException { if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { throw new IOException("Using no compression but " + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", " + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader + ", numChecksumbytes=" + totalChecksumBytes()); } }
/** An additional sanity-check in case no compression or encryption is being used. */ @VisibleForTesting void sanityCheckUncompressedSize() throws IOException { if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { throw new IOException("Using no compression but " + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", " + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader + ", numChecksumbytes=" + totalChecksumBytes()); } }
/** * Read the root-level metadata of a multi-level block index. Based on * {@link #readRootIndex(DataInput, int)}, but also reads metadata * necessary to compute the mid-key in a multi-level index. * * @param blk the HFile block * @param numEntries the number of root-level index entries * @throws IOException */ public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = readRootIndex(blk, numEntries); // after reading the root index the checksum bytes have to // be subtracted to know if the mid key exists. int checkSumBytes = blk.totalChecksumBytes(); if ((in.available() - checkSumBytes) < MID_KEY_METADATA_SIZE) { // No mid-key metadata available. return; } midLeafBlockOffset = in.readLong(); midLeafBlockOnDiskSize = in.readInt(); midKeyEntry = in.readInt(); }
/** * Return true when this block's buffer has been unpacked, false otherwise. Note this is a * calculated heuristic, not tracked attribute of the block. */ public boolean isUnpacked() { final int cksumBytes = totalChecksumBytes(); final int headerSize = headerSize(); final int expectedCapacity = headerSize + uncompressedSizeWithoutHeader + cksumBytes; final int bufCapacity = buf.capacity(); return bufCapacity == expectedCapacity || bufCapacity == expectedCapacity + headerSize; }
/** * Always allocates a new buffer of the correct size. Copies header bytes * from the existing buffer. Does not change header fields. * Reserve room to keep checksum bytes too. */ private void allocateBuffer() { int cksumBytes = totalChecksumBytes(); int headerSize = headerSize(); int capacityNeeded = headerSize + uncompressedSizeWithoutHeader + cksumBytes; // TODO we need consider allocating offheap here? ByteBuffer newBuf = ByteBuffer.allocate(capacityNeeded); // Copy header bytes into newBuf. // newBuf is HBB so no issue in calling array() buf.position(0); buf.get(newBuf.array(), newBuf.arrayOffset(), headerSize); buf = new SingleByteBuff(newBuf); // set limit to exclude next block's header buf.limit(headerSize + uncompressedSizeWithoutHeader + cksumBytes); }
.append(", totalChecksumBytes=").append(totalChecksumBytes()) .append(", isUnpacked=").append(isUnpacked()) .append(", buf=[").append(buf).append("]")
/** * Returns a buffer that does not include the header or checksum. * * @return the buffer with header skipped and checksum omitted. */ public ByteBuff getBufferWithoutHeader() { ByteBuff dup = getBufferReadOnly(); // Now set it up so Buffer spans content only -- no header or no checksums. return dup.position(headerSize()).limit(buf.limit() - totalChecksumBytes()).slice(); }
assertEquals(4936, b.getUncompressedSizeWithoutHeader()); assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); HFileBlock expected = b; hbr = new HFileBlock.FSReaderImpl(is, totalSize, meta); b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE + b.totalChecksumBytes(), pread, false); assertEquals(expected, b); int wrongCompressedSize = 2172;
int cksumBytes = totalChecksumBytes(); int expectedBufLimit = onDiskDataSizeWithHeader + cksumBytes; if (dup.limit() != expectedBufLimit) {
boolean bytesAreCorrect = Bytes.compareTo(bufRead.array(), bufRead.arrayOffset(), bufRead.limit() - b.totalChecksumBytes(), bufExpected.array(), bufExpected.arrayOffset(), bufExpected.limit()) == 0;
assertEquals(4936, b.getUncompressedSizeWithoutHeader()); assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); assertEquals(4936, b.getUncompressedSizeWithoutHeader()); assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
/** An additional sanity-check in case no compression is being used. */ public void assumeUncompressed() throws IOException { if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { throw new IOException("Using no compression but " + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", " + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader + ", numChecksumbytes=" + totalChecksumBytes()); } }
/** An additional sanity-check in case no compression or encryption is being used. */ public void assumeUncompressed() throws IOException { if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { throw new IOException("Using no compression but " + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", " + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader + ", numChecksumbytes=" + totalChecksumBytes()); } }
/** * Returns the buffer this block stores internally. The clients must not * modify the buffer object. This method has to be public because it is * used in {@link CompoundBloomFilter} to avoid object creation on every * Bloom filter lookup, but has to be used with caution. Checksum data * is not included in the returned buffer. * * @return the buffer of this block for read-only operations */ public ByteBuffer getBufferReadOnly() { return ByteBuffer.wrap(buf.array(), buf.arrayOffset(), buf.limit() - totalChecksumBytes()).slice(); }
/** * Returns the buffer this block stores internally. The clients must not * modify the buffer object. This method has to be public because it is * used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter} * to avoid object creation on every Bloom filter lookup, but has to * be used with caution. Checksum data is not included in the returned * buffer but header data is. * * @return the buffer of this block for read-only operations */ public ByteBuffer getBufferReadOnly() { ByteBuffer dup = this.buf.duplicate(); dup.limit(buf.limit() - totalChecksumBytes()); return dup.slice(); }
private void setNextBlockHeader(long offset, HFileBlock b) { PrefetchedHeader prefetchedHeader = prefetchedHeaderForThread.get(); prefetchedHeader.offset = offset + b.getOnDiskSizeWithHeader(); int nextHeaderOffset = b.buf.arrayOffset() + hdrSize + b.uncompressedSizeWithoutHeader + b.totalChecksumBytes(); System.arraycopy(b.buf.array(), nextHeaderOffset, prefetchedHeader.header, 0, hdrSize); }
/** * Returns a buffer that does not include the header or checksum. * * @return the buffer with header skipped and checksum omitted. */ public ByteBuffer getBufferWithoutHeader() { ByteBuffer dup = this.buf.duplicate(); dup.position(headerSize()); dup.limit(buf.limit() - totalChecksumBytes()); return dup.slice(); }
/** * Return true when this block's buffer has been unpacked, false otherwise. Note this is a * calculated heuristic, not tracked attribute of the block. */ public boolean isUnpacked() { final int cksumBytes = totalChecksumBytes(); final int headerSize = headerSize(); final int expectedCapacity = headerSize + uncompressedSizeWithoutHeader + cksumBytes; final int bufCapacity = buf.capacity(); return bufCapacity == expectedCapacity || bufCapacity == expectedCapacity + headerSize; }
/** * Returns a buffer that does not include the header. The array offset points * to the start of the block data right after the header. The underlying data * array is not copied. Checksum data is not included in the returned buffer. * * @return the buffer with header skipped */ ByteBuffer getBufferWithoutHeader() { return ByteBuffer.wrap(buf.array(), buf.arrayOffset() + headerSize(), buf.limit() - headerSize() - totalChecksumBytes()).slice(); }
/** * Always allocates a new buffer of the correct size. Copies header bytes * from the existing buffer. Does not change header fields. * Reserve room to keep checksum bytes too. * * @param extraBytes whether to reserve room in the buffer to read the next * block's header */ private void allocateBuffer(boolean extraBytes) { int cksumBytes = totalChecksumBytes(); int capacityNeeded = headerSize() + uncompressedSizeWithoutHeader + cksumBytes + (extraBytes ? headerSize() : 0); ByteBuffer newBuf = ByteBuffer.allocate(capacityNeeded); // Copy header bytes. System.arraycopy(buf.array(), buf.arrayOffset(), newBuf.array(), newBuf.arrayOffset(), headerSize()); buf = newBuf; buf.limit(headerSize() + uncompressedSizeWithoutHeader + cksumBytes); }