@Override public long getBlockCount() { return onHeapCache.getBlockCount() + l2Cache.getBlockCount(); }
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(String.format("%,d", bc.getBlockCount())), jamonWriter);
@Test public void testClearBlockCache() throws Exception { BlockCache blockCache1 = rs1.getBlockCache().get(); BlockCache blockCache2 = rs2.getBlockCache().get(); long initialBlockCount1 = blockCache1.getBlockCount(); long initialBlockCount2 = blockCache2.getBlockCount(); // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); clearRegionBlockCache(rs1); scanAllRegionsForRS(rs2); assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); clearRegionBlockCache(rs2); assertEquals(initialBlockCount1, blockCache1.getBlockCount()); assertEquals(initialBlockCount2, blockCache2.getBlockCount()); }
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(String.format("%,d", bc.getBlockCount())), jamonWriter);
private void clearBlockCache(BlockCache blockCache) throws InterruptedException { if (blockCache instanceof LruBlockCache) { ((LruBlockCache) blockCache).clearCache(); } else { // BucketCache may not return all cached blocks(blocks in write queue), so check it here. for (int clearCount = 0; blockCache.getBlockCount() > 0; clearCount++) { if (clearCount > 0) { LOG.warn("clear block cache " + blockCache + " " + clearCount + " times, " + blockCache.getBlockCount() + " blocks remaining"); Thread.sleep(10); } for (CachedBlock block : Lists.newArrayList(blockCache)) { BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset()); // CombinedBucketCache may need evict two times. for (int evictCount = 0; blockCache.evictBlock(key); evictCount++) { if (evictCount > 1) { LOG.warn("evict block " + block + " in " + blockCache + " " + evictCount + " times, maybe a bug here"); } } } } } }
@Test public void testClearBlockCacheFromAdmin() throws Exception { Admin admin = HTU.getAdmin(); BlockCache blockCache1 = rs1.getBlockCache().get(); BlockCache blockCache2 = rs2.getBlockCache().get(); long initialBlockCount1 = blockCache1.getBlockCount(); long initialBlockCount2 = blockCache2.getBlockCount(); // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); scanAllRegionsForRS(rs2); assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME); assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); assertEquals(initialBlockCount1, blockCache1.getBlockCount()); assertEquals(initialBlockCount2, blockCache2.getBlockCount()); }
@Test public void testClearBlockCacheFromAsyncAdmin() throws Exception { AsyncAdmin admin = ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get().getAdmin(); BlockCache blockCache1 = rs1.getBlockCache().get(); BlockCache blockCache2 = rs2.getBlockCache().get(); long initialBlockCount1 = blockCache1.getBlockCount(); long initialBlockCount2 = blockCache2.getBlockCount(); // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); scanAllRegionsForRS(rs2); assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get(); assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU .getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); assertEquals(initialBlockCount1, blockCache1.getBlockCount()); assertEquals(initialBlockCount2, blockCache2.getBlockCount()); }
@Test public void testLruBlockCache() throws IOException { CacheConfig cc = new CacheConfig(this.conf); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); assertTrue(blockCache instanceof LruBlockCache); logPerBlock(blockCache); addDataAndHits(blockCache, 3); // The below has no asserts. It is just exercising toString and toJSON code. LOG.info("count=" + blockCache.getBlockCount() + ", currentSize=" + blockCache.getCurrentSize() + ", freeSize=" + blockCache.getFreeSize()); LOG.info(Objects.toString(blockCache.getStats())); BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(blockCache); LOG.info(Objects.toString(cbsbf)); logPerFile(cbsbf); bucketCacheReport(blockCache); LOG.info(BlockCacheUtil.toJSON(cbsbf)); }
long blocksStart = blockCache.getBlockCount(); Scan scan = new Scan(); scan.setCacheBlocks(false); assertEquals(2 * BLOOM_TYPE.length, result.size()); rs.close(); long blocksEnd = blockCache.getBlockCount(); assertEquals(2 * BLOOM_TYPE.length, result.size()); rs.close(); blocksEnd = blockCache.getBlockCount();
/** * @param bc The block cache instance. * @param cc Cache config. * @param doubling If true, addition of element ups counter by 2, not 1, because element added * to onheap and offheap caches. * @param sizing True if we should run sizing test (doesn't always apply). */ void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling, final boolean sizing) { assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); BlockCacheKey bck = new BlockCacheKey("f", 0); Cacheable c = new DataCacheEntry(); // Do asserts on block counting. long initialBlockCount = bc.getBlockCount(); bc.cacheBlock(bck, c, cc.isInMemory()); assertEquals(doubling ? 2 : 1, bc.getBlockCount() - initialBlockCount); bc.evictBlock(bck); assertEquals(initialBlockCount, bc.getBlockCount()); // Do size accounting. Do it after the above 'warm-up' because it looks like some // buffers do lazy allocation so sizes are off on first go around. if (sizing) { long originalSize = bc.getCurrentSize(); bc.cacheBlock(bck, c, cc.isInMemory()); assertTrue(bc.getCurrentSize() > originalSize); bc.evictBlock(bck); long size = bc.getCurrentSize(); assertEquals(originalSize, size); } }
assertEquals(0, blockCache.getBlockCount()); region.compact(false); LOG.debug("compactStores() returned");
long initialL2BlockCount = bc.getBlockCount(); Cacheable c = new DataCacheEntry(); BlockCacheKey bck = new BlockCacheKey("bck", 0); lbc.cacheBlock(bck, c, false); assertEquals(initialL1BlockCount + 1, lbc.getBlockCount()); assertEquals(initialL2BlockCount, bc.getBlockCount());
long startBlockCount = cache.getBlockCount(); long startBlockHits = cache.getStats().getHitCount(); long startBlockMiss = cache.getStats().getMissCount(); if (startBlockCount != cache.getBlockCount() || startBlockHits != cache.getStats().getHitCount() || startBlockMiss != cache.getStats().getMissCount()) { startBlockCount = cache.getBlockCount(); startBlockHits = cache.getStats().getHitCount(); startBlockMiss = cache.getStats().getMissCount(); assertEquals(startBlockCount, cache.getBlockCount()); assertEquals(startBlockHits, cache.getStats().getHitCount()); assertEquals(startBlockMiss, cache.getStats().getMissCount()); long expectedBlockHits = startBlockHits; long expectedBlockMiss = startBlockMiss; assertEquals(expectedBlockCount, cache.getBlockCount()); assertEquals(expectedBlockHits, cache.getStats().getHitCount()); assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); assertEquals(expectedBlockCount, cache.getBlockCount()); assertEquals(++expectedBlockHits, cache.getStats().getHitCount()); assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data)); assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2)); assertEquals(expectedBlockCount, cache.getBlockCount()); assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
@Override public long getBlockCacheCount() { if (this.blockCache == null) { return 0; } return this.blockCache.getBlockCount(); }
private static long getBlkCount() { return blockCache.getBlockCount(); }
@Override public long getBlockCount() { return lruCache.getBlockCount() + l2Cache.getBlockCount(); }
@Test public void testClearBlockCache() throws Exception { BlockCache blockCache1 = rs1.getCacheConfig().getBlockCache(); BlockCache blockCache2 = rs2.getCacheConfig().getBlockCache(); long initialBlockCount1 = blockCache1.getBlockCount(); long initialBlockCount2 = blockCache2.getBlockCount(); // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); clearRegionBlockCache(rs1); scanAllRegionsForRS(rs2); assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); clearRegionBlockCache(rs2); assertEquals(initialBlockCount1, blockCache1.getBlockCount()); assertEquals(initialBlockCount2, blockCache2.getBlockCount()); }
@Test public void testClearBlockCacheFromAsyncAdmin() throws Exception { AsyncAdmin admin = ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get().getAdmin(); // All RS run in a same process, so the block cache is same for rs1 and rs2 BlockCache blockCache = rs1.getCacheConfig().getBlockCache(); long initialBlockCount = blockCache.getBlockCount(); // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); assertEquals(blockCache.getBlockCount() - initialBlockCount, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); scanAllRegionsForRS(rs2); assertEquals(blockCache.getBlockCount() - initialBlockCount, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get(); assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); assertEquals(initialBlockCount, blockCache.getBlockCount()); }
@Test public void testClearBlockCacheFromAdmin() throws Exception { Admin admin = HTU.getAdmin(); // All RS run in a same process, so the block cache is same for rs1 and rs2 BlockCache blockCache = rs1.getCacheConfig().getBlockCache(); long initialBlockCount = blockCache.getBlockCount(); // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); assertEquals(blockCache.getBlockCount() - initialBlockCount, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); scanAllRegionsForRS(rs2); assertEquals(blockCache.getBlockCount() - initialBlockCount, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME); assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); assertEquals(initialBlockCount, blockCache.getBlockCount()); }
@Test public void testLruBlockCache() throws IOException { CacheConfig.instantiateBlockCache(this.conf); CacheConfig cc = new CacheConfig(this.conf); assertTrue(cc.isBlockCacheEnabled()); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); assertTrue(cc.getBlockCache() instanceof LruBlockCache); logPerBlock(cc.getBlockCache()); addDataAndHits(cc.getBlockCache(), 3); // The below has no asserts. It is just exercising toString and toJSON code. BlockCache bc = cc.getBlockCache(); LOG.info("count=" + bc.getBlockCount() + ", currentSize=" + bc.getCurrentSize() + ", freeSize=" + bc.getFreeSize() ); LOG.info(Objects.toString(cc.getBlockCache().getStats())); BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(cc.getBlockCache()); LOG.info(Objects.toString(cbsbf)); logPerFile(cbsbf); bucketCacheReport(cc.getBlockCache()); LOG.info(BlockCacheUtil.toJSON(cbsbf)); }