/** * Return true if we may find this type of block in block cache. * <p> * TODO: today {@code family.isBlockCacheEnabled()} only means {@code cacheDataOnRead}, so here we * consider lots of other configurations such as {@code cacheDataOnWrite}. We should fix this in * the future, {@code cacheDataOnWrite} should honor the CF level {@code isBlockCacheEnabled} * configuration. */ public boolean shouldReadBlockFromCache(BlockType blockType) { if (cacheDataOnRead) { return true; } if (prefetchOnOpen) { return true; } if (cacheDataOnWrite) { return true; } if (blockType == null) { return true; } if (blockType.getCategory() == BlockCategory.BLOOM || blockType.getCategory() == BlockCategory.INDEX) { return true; } return false; }
/** * If we make sure the block could not be cached, we will not acquire the lock * otherwise we will acquire lock */ public boolean shouldLockOnCacheMiss(BlockType blockType) { if (blockType == null) { return true; } return shouldCacheBlockOnRead(blockType.getCategory()); }
@Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA; if (metaBlock) { onHeapCache.cacheBlock(cacheKey, buf, inMemory); } else { l2Cache.cacheBlock(cacheKey, buf, inMemory); } }
this.isPrimaryReplicaReader(), BlockType.META); cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, false, true, true, BlockType.META, null);
validateBlockType(hfileBlock, expectedBlockType); HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader); BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory();
(HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics); if (cachedBlock != null) { if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { HFileBlock compressedBlock = cachedBlock; cachedBlock = compressedBlock.unpack(hfileContext, fsBlockReader);
candidatesFound++; HFileBlock block = (HFileBlock) e.getValue().getBuffer(); if (cc.shouldCacheCompressed(block.getBlockType().getCategory())) { assertFalse("found an unpacked block, block=" + block + ", block buffer capacity=" + block.getBufferWithoutHeader().capacity(), block.isUnpacked());
shouldBeCached == isCached); if (isCached) { if (cacheConf.shouldCacheCompressed(fromCache.getBlockType().getCategory())) { if (compress != Compression.Algorithm.NONE) { assertFalse(fromCache.isUnpacked());
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(), getUncompressedSizeWithoutHeader(), prevOffset, cacheConf.shouldCacheCompressed(blockType.getCategory())? cloneOnDiskBufferWithHeader() : cloneUncompressedBufferWithHeader(),
/** * If we make sure the block could not be cached, we will not acquire the lock * otherwise we will acquire lock */ public boolean shouldLockOnCacheMiss(BlockType blockType) { if (blockType == null) { return true; } return shouldCacheBlockOnRead(blockType.getCategory()); }
/** * Return true if we may find this type of block in block cache. * <p/> * TODO: today {@code family.isBlockCacheEnabled()} only means {@code cacheDataOnRead}, so here we * consider lots of other configurations such as {@code cacheDataOnWrite}. We should fix this in * the future, {@code cacheDataOnWrite} should honor the CF level {@code isBlockCacheEnabled} * configuration. */ public boolean shouldReadBlockFromCache(BlockType blockType) { if (!isBlockCacheEnabled()) { return false; } if (cacheDataOnRead) { return true; } if (prefetchOnOpen) { return true; } if (cacheDataOnWrite) { return true; } if (blockType == null) { return true; } if (blockType.getCategory() == BlockCategory.BLOOM || blockType.getCategory() == BlockCategory.INDEX) { return true; } return false; }
@Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, final boolean cacheDataInL1) { boolean isMetaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA; if (isMetaBlock || cacheDataInL1) { lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); } else { l2Cache.cacheBlock(cacheKey, buf, inMemory, false); } }
/** * Helper function that updates the local size counter and also updates any * per-cf or per-blocktype metrics it can discern from given * {@link CachedBlock} * * @param cb * @param evict */ protected long updateSizeMetrics(CachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); if (evict) { heapsize *= -1; } Cacheable cachedBlock = cb.getBuffer(); SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics(); if (schemaMetrics != null) { schemaMetrics.updateOnCachePutOrEvict( cachedBlock.getBlockType().getCategory(), heapsize, evict); } return size.addAndGet(heapsize); }
if (cachedBlock != null) { BlockCategory blockCategory = cachedBlock.getBlockType().getCategory(); validateBlockType(hfileBlock, expectedBlockType); passSchemaMetricsTo(hfileBlock); BlockCategory blockCategory = hfileBlock.getBlockType().getCategory(); hfileBlock.getBlockType().getCategory())) { cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory());
if (cachedBlock != null) { getSchemaMetrics().updateOnCacheHit( cachedBlock.getBlockType().getCategory(), isCompaction); return cachedBlock.getBufferWithoutHeader(); hfileBlock.getBlockType().getCategory())) { cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory());
validateBlockType(hfileBlock, expectedBlockType); HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader); BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory();
updateCacheMetrics); if (cachedBlock != null) { if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { cachedBlock = cachedBlock.unpack(hfileContext, fsBlockReader);
candidatesFound++; HFileBlock block = (HFileBlock) e.getValue().getBuffer(); if (cc.shouldCacheCompressed(block.getBlockType().getCategory())) { assertFalse("found an unpacked block, block=" + block + ", block buffer capacity=" + block.getBufferWithoutHeader().capacity(), block.isUnpacked());
/** * Creates a new HFileBlock. Checksums have already been validated, so * the byte buffer passed into the constructor of this newly created * block does not have checksum data even though the header minor * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a * 0 value in bytesPerChecksum. */ public HFileBlock getBlockForCaching(CacheConfig cacheConf) { HFileContext newContext = new HFileContextBuilder() .withBlockSize(fileContext.getBlocksize()) .withBytesPerCheckSum(0) .withChecksumType(ChecksumType.NULL) // no checksums in cached data .withCompression(fileContext.getCompression()) .withDataBlockEncoding(fileContext.getDataBlockEncoding()) .withHBaseCheckSum(fileContext.isUseHBaseChecksum()) .withCompressTags(fileContext.isCompressTags()) .withIncludesMvcc(fileContext.isIncludesMvcc()) .withIncludesTags(fileContext.isIncludesTags()) .build(); return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(), getUncompressedSizeWithoutHeader(), prevOffset, cacheConf.shouldCacheCompressed(blockType.getCategory()) ? getOnDiskBufferWithHeader() : getUncompressedBufferWithHeader(), FILL_HEADER, startOffset, onDiskBytesWithHeader.length + onDiskChecksum.length, newContext); } }
shouldBeCached == isCached); if (isCached) { if (cacheConf.shouldCacheCompressed(fromCache.getBlockType().getCategory())) { if (compress != Compression.Algorithm.NONE) { assertFalse(fromCache.isUnpacked());