@Override public HFileBlock nextBlockWithBlockType(BlockType blockType) throws IOException { HFileBlock blk = nextBlock(); if (blk.getBlockType() != blockType) { throw new IOException("Expected block of type " + blockType + " but found " + blk.getBlockType()); } return blk; } };
private DataInput getBloomFilterMetadata(BlockType blockType) throws IOException { if (blockType != BlockType.GENERAL_BLOOM_META && blockType != BlockType.DELETE_FAMILY_BLOOM_META) { throw new RuntimeException("Block Type: " + blockType.toString() + " is not supported") ; } for (HFileBlock b : loadOnOpenBlocks) if (b.getBlockType() == blockType) return b.getByteStream(); return null; }
/** * Compares the actual type of a block retrieved from cache or disk with its * expected type and throws an exception in case of a mismatch. Expected * block type of {@link BlockType#DATA} is considered to match the actual * block type [@link {@link BlockType#ENCODED_DATA} as well. * @param block a block retrieved from cache or disk * @param expectedBlockType the expected block type, or null to skip the * check */ private void validateBlockType(HFileBlock block, BlockType expectedBlockType) throws IOException { if (expectedBlockType == null) { return; } BlockType actualBlockType = block.getBlockType(); if (expectedBlockType.isData() && actualBlockType.isData()) { // We consider DATA to match ENCODED_DATA for the purpose of this // verification. return; } if (actualBlockType != expectedBlockType) { throw new IOException("Expected block type " + expectedBlockType + ", " + "but got " + actualBlockType + ": " + block); } }
/** * Updates the current block to be the given {@link HFileBlock}. Seeks to * the the first key/value pair. * * @param newBlock the block to make current */ protected void updateCurrentBlock(HFileBlock newBlock) throws IOException { // Set the active block on the reader // sanity check if (newBlock.getBlockType() != BlockType.DATA) { throw new IllegalStateException("ScannerV2 works only on data " + "blocks, got " + newBlock.getBlockType() + "; " + "fileName=" + reader.getName() + ", " + "dataBlockEncoder=" + reader.getDataBlockEncoding() + ", " + "isCompaction=" + isCompaction); } updateCurrBlockRef(newBlock); blockBuffer = newBlock.getBufferWithoutHeader(); readKeyValueLen(); blockFetches.incrementAndGet(); // Reset the next indexed key this.nextIndexedKey = null; }
/** * Caches the last written HFile block. * @param offset the offset of the block we want to cache. Used to determine * the cache key. */ private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), cacheFormatBlock); }); }
/** * Updates the current block to be the given {@link HFileBlock}. Seeks to * the the first key/value pair. * * @param newBlock the block to make current * @throws CorruptHFileException */ @Override protected void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException { // sanity checks if (newBlock.getBlockType() != BlockType.ENCODED_DATA) { throw new IllegalStateException("EncodedScanner works only on encoded data blocks"); } short dataBlockEncoderId = newBlock.getDataBlockEncodingId(); if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) { String encoderCls = dataBlockEncoder.getClass().getName(); throw new CorruptHFileException("Encoder " + encoderCls + " doesn't support data block encoding " + DataBlockEncoding.getNameFromId(dataBlockEncoderId)); } updateCurrBlockRef(newBlock); ByteBuff encodedBuffer = getEncodedBuffer(newBlock); seeker.setCurrentBuffer(encodedBuffer); blockFetches.incrementAndGet(); // Reset the next indexed key this.nextIndexedKey = null; }
@Override public void returnBlock(HFileBlock block) { if (block != null) { this.cacheConf.getBlockCache().ifPresent(blockCache -> { BlockCacheKey cacheKey = new BlockCacheKey(this.getFileContext().getHFileName(), block.getOffset(), this.isPrimaryReplicaReader(), block.getBlockType()); blockCache.returnBlock(cacheKey, block); }); } }
block.getNextBlockOnDiskSize(), cacheBlocks, pread, isCompaction, true, null, getEffectiveDataBlockEncoding()); if (block != null && !block.getBlockType().isData()) { // Findbugs: NP_NULL_ON_SOME_PATH } while (!block.getBlockType().isData());
assertEquals(types.get(blockId), b.getBlockType()); assertEquals(expectedSize, b.getOnDiskSizeWithHeader()); assertEquals(offset, b.getOffset());
private void readStoreFile(Path storeFilePath) throws Exception { // Open the file HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf); while (!reader.prefetchComplete()) { // Sleep for a bit Thread.sleep(1000); } // Check that all of the data blocks were preloaded BlockCache blockCache = cacheConf.getBlockCache().get(); long offset = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); boolean isCached = blockCache.getBlock(blockCacheKey, true, false, true) != null; if (block.getBlockType() == BlockType.DATA || block.getBlockType() == BlockType.ROOT_INDEX || block.getBlockType() == BlockType.INTERMEDIATE_INDEX) { assertTrue(isCached); } offset += block.getOnDiskSizeWithHeader(); } }
if (cachedBlock.getBlockType().isData()) { if (updateCacheMetrics) { HFile.DATABLOCK_READ_COUNT.increment(); validateBlockType(hfileBlock, expectedBlockType); HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader); BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory(); if (updateCacheMetrics && hfileBlock.getBlockType().isData()) { HFile.DATABLOCK_READ_COUNT.increment();
(HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics); if (cachedBlock != null) { if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { HFileBlock compressedBlock = cachedBlock; cachedBlock = compressedBlock.unpack(hfileContext, fsBlockReader); if (cachedBlock.getBlockType().isData() && !actualDataBlockEncoding.equals(expectedDataBlockEncoding)) {
private void testEncodingInternals(boolean useTag) throws IOException { // usually we have just block without headers, but don't complicate that List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag); HFileBlock block = getSampleHFileBlock(kvs, useTag); HFileBlock blockOnDisk = createBlockOnDisk(kvs, block, useTag); if (blockEncoder.getDataBlockEncoding() != DataBlockEncoding.NONE) { assertEquals(BlockType.ENCODED_DATA, blockOnDisk.getBlockType()); assertEquals(blockEncoder.getDataBlockEncoding().getId(), blockOnDisk.getDataBlockEncodingId()); } else { assertEquals(BlockType.DATA, blockOnDisk.getBlockType()); } }
HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock(new BlockCacheKey(nameForCaching, rootLevelIndexPos, true, blockForCaching.getBlockType()), blockForCaching); });
HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock( new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), blockForCaching); });
private void testEncodingWithCacheInternals(boolean useTag) throws IOException { List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag); HFileBlock block = getSampleHFileBlock(kvs, useTag); HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag); LruBlockCache blockCache = new LruBlockCache(8 * 1024 * 1024, 32 * 1024); BlockCacheKey cacheKey = new BlockCacheKey("test", 0); blockCache.cacheBlock(cacheKey, cacheBlock); HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true); assertTrue(heapSize instanceof HFileBlock); HFileBlock returnedBlock = (HFileBlock) heapSize; if (blockEncoder.getDataBlockEncoding() == DataBlockEncoding.NONE) { assertEquals(block.getBufferReadOnly(), returnedBlock.getBufferReadOnly()); } else { if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) { System.out.println(blockEncoder); } assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType()); } }
expectedTypes.get(i), b.getBlockType()); assertEquals("Invalid previous block offset for block " + i + " of " + "type " + b.getBlockType() + ":", (long) expectedPrevOffsets.get(i), b.getPrevBlockOffset()); b.sanityCheck(); b2.sanityCheck(); assertEquals(b.getBlockType(), b2.getBlockType()); assertEquals(b.getOnDiskSizeWithoutHeader(), b2.getOnDiskSizeWithoutHeader());
cachedBlocksOffset.add(offset); cachedBlocks.put(offset, fromCache); boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); assertTrue("shouldBeCached: " + shouldBeCached+ "\n" + "isCached: " + isCached + "\n" + shouldBeCached == isCached); if (isCached) { if (cacheConf.shouldCacheCompressed(fromCache.getBlockType().getCategory())) { if (compress != Compression.Algorithm.NONE) { assertFalse(fromCache.isUnpacked()); assertEquals(block.getBlockType(), fromCache.getBlockType()); assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); assertEquals(block.getOnDiskSizeWithHeader(), fromCache.getOnDiskSizeWithHeader()); assertEquals(block.getOnDiskSizeWithoutHeader(), fromCache.getOnDiskSizeWithoutHeader()); BlockType bt = block.getBlockType(); Integer count = blockCountByType.get(bt); blockCountByType.put(bt, (count == null ? 0 : count) + 1);
candidatesFound++; HFileBlock block = (HFileBlock) e.getValue().getBuffer(); if (cc.shouldCacheCompressed(block.getBlockType().getCategory())) { assertFalse("found an unpacked block, block=" + block + ", block buffer capacity=" + block.getBufferWithoutHeader().capacity(), block.isUnpacked());
offset); boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null; boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); if (shouldBeCached != isCached) { throw new AssertionError(