CachedItem(String blockName, int size) { this.cacheKey = new BlockCacheKey(blockName, 0); this.size = size; }
CachedItem(String blockName, int size, int offset) { this.cacheKey = new BlockCacheKey(blockName, offset); this.size = size; }
/** * Evicts all blocks for a specific HFile. * <p> * This is used for evict-on-close to remove all blocks of a specific HFile. * * @return the number of blocks evicted */ @Override public int evictBlocksByHfileName(String hfileName) { Set<BlockCacheKey> keySet = blocksByHFile.subSet( new BlockCacheKey(hfileName, Long.MIN_VALUE), true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); int numEvicted = 0; for (BlockCacheKey key : keySet) { if (evictBlock(key)) { ++numEvicted; } } return numEvicted; }
public CachedBlock(final long heapSize, String name, long accessTime) { super(new BlockCacheKey(name, 0), new Cacheable() { @Override public long heapSize() { return ((int)(heapSize - CachedBlock.PER_BLOCK_OVERHEAD)); } @Override public int getSerializedLength() { return 0; } @Override public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) { } @Override public CacheableDeserializer<Cacheable> getDeserializer() { // TODO Auto-generated method stub return null; } @Override public BlockType getBlockType() { return BlockType.DATA; } @Override public MemoryType getMemoryType() { return MemoryType.EXCLUSIVE; } }, accessTime, false); } }
/** * Caches the last written HFile block. * @param offset the offset of the block we want to cache. Used to determine * the cache key. */ private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), cacheFormatBlock); }); }
@Before public void setUp() throws Exception { BlockCacheKey cacheKey = new BlockCacheKey("name", 0); BlockCacheKey otherKey = new BlockCacheKey("name2", 1); Cacheable cacheable = Mockito.mock(Cacheable.class); Cacheable otheCacheable = Mockito.mock(Cacheable.class); block = new LruCachedBlock(cacheKey, cacheable, 0); blockEqual = new LruCachedBlock(otherKey, otheCacheable, 0); blockNotEqual = new LruCachedBlock(cacheKey, cacheable, 1); }
@Override public void returnBlock(HFileBlock block) { if (block != null) { this.cacheConf.getBlockCache().ifPresent(blockCache -> { BlockCacheKey cacheKey = new BlockCacheKey(this.getFileContext().getHFileName(), block.getOffset(), this.isPrimaryReplicaReader(), block.getBlockType()); blockCache.returnBlock(cacheKey, block); }); } }
@Override public void doAnAction() throws Exception { for (int j = 0; j < 100; j++) { BlockCacheKey key = new BlockCacheKey("key_" + finalI + "_" + j, 0); Arrays.fill(buf, (byte) (finalI * j)); final ByteArrayCacheable bac = new ByteArrayCacheable(buf); ByteArrayCacheable gotBack = (ByteArrayCacheable) toBeTested .getBlock(key, true, false, true); if (gotBack != null) { assertArrayEquals(gotBack.buf, bac.buf); } else { toBeTested.cacheBlock(key, bac); } } totalQueries.incrementAndGet(); } };
private void iterateBlockCache(BlockCache cache, Iterator<CachedBlock> iterator) { int refCount; while (iterator.hasNext()) { CachedBlock next = iterator.next(); BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey); } else { continue; } assertEquals(0, refCount); } }
/** * Set up variables and get BucketCache and WriterThread into state where tests can manually * control the running of WriterThread and BucketCache is empty. * @throws Exception */ @Before public void setUp() throws Exception { // Arbitrary capacity. final int capacity = 16; // Run with one writer thread only. Means there will be one writer queue only too. We depend // on this in below. final int writerThreadsCount = 1; this.bc = new MockBucketCache("offheap", capacity, 1, new int [] {1}, writerThreadsCount, capacity, null, 100/*Tolerate ioerrors for 100ms*/); assertEquals(writerThreadsCount, bc.writerThreads.length); assertEquals(writerThreadsCount, bc.writerQueues.size()); // Get reference to our single WriterThread instance. this.wt = bc.writerThreads[0]; this.q = bc.writerQueues.get(0); wt.disableWriter(); this.plainKey = new BlockCacheKey("f", 0); this.plainCacheable = Mockito.mock(Cacheable.class); assertThat(bc.ramCache.isEmpty(), is(true)); assertTrue(q.isEmpty()); }
@Override public void run() { Scan s = new Scan().withStartRow(ROW4).withStopRow(ROW5).setCaching(1); try { while(!doScan.get()) { try { // Sleep till you start scan Thread.sleep(1); } catch (InterruptedException e) { } } List<BlockCacheKey> cacheList = new ArrayList<>(); Iterator<CachedBlock> iterator = cache.iterator(); // evict all the blocks while (iterator.hasNext()) { CachedBlock next = iterator.next(); BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); cacheList.add(cacheKey); // evict what ever is available cache.evictBlock(cacheKey); } try (ResultScanner scanner = table.getScanner(s)) { while (scanner.next() != null) { } } compactReadLatch.countDown(); } catch (IOException e) { } } }
private void addDataAndHits(final BlockCache bc, final int count) { Cacheable dce = new DataCacheEntry(); Cacheable ice = new IndexCacheEntry(); for (int i = 0; i < count; i++) { BlockCacheKey bckd = new BlockCacheKey("f", i); BlockCacheKey bcki = new BlockCacheKey("f", i + count); bc.getBlock(bckd, true, false, true); bc.cacheBlock(bckd, dce); bc.cacheBlock(bcki, ice); bc.getBlock(bckd, true, false, true); bc.getBlock(bcki, true, false, true); } assertEquals(2 * count /*Data and Index blocks*/, bc.getStats().getHitCount()); BlockCacheKey bckd = new BlockCacheKey("f", 0); BlockCacheKey bcki = new BlockCacheKey("f", 0 + count); bc.evictBlock(bckd); bc.evictBlock(bcki); bc.getStats().getEvictedCount(); }
private void clearBlockCache(BlockCache blockCache) throws InterruptedException { if (blockCache instanceof LruBlockCache) { ((LruBlockCache) blockCache).clearCache(); } else { // BucketCache may not return all cached blocks(blocks in write queue), so check it here. for (int clearCount = 0; blockCache.getBlockCount() > 0; clearCount++) { if (clearCount > 0) { LOG.warn("clear block cache " + blockCache + " " + clearCount + " times, " + blockCache.getBlockCount() + " blocks remaining"); Thread.sleep(10); } for (CachedBlock block : Lists.newArrayList(blockCache)) { BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset()); // CombinedBucketCache may need evict two times. for (int evictCount = 0; blockCache.evictBlock(key); evictCount++) { if (evictCount > 1) { LOG.warn("evict block " + block + " in " + blockCache + " " + evictCount + " times, maybe a bug here"); } } } } } }
@Test public void testMemoryLeak() throws Exception { final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); long lockId = cache.backingMap.get(cacheKey).offset(); ReentrantReadWriteLock lock = cache.offsetLock.getLock(lockId); lock.writeLock().lock(); Thread evictThread = new Thread("evict-block") { @Override public void run() { cache.evictBlock(cacheKey); } }; evictThread.start(); cache.offsetLock.waitForWaiters(lockId, 1); cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); lock.writeLock().unlock(); evictThread.join(); assertEquals(1L, cache.getBlockCount()); assertTrue(cache.getCurrentSize() > 0L); assertTrue("We should have a block!", cache.iterator().hasNext()); }
HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock( new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), blockForCaching); });
private void readStoreFile(Path storeFilePath) throws Exception { // Open the file HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf); while (!reader.prefetchComplete()) { // Sleep for a bit Thread.sleep(1000); } // Check that all of the data blocks were preloaded BlockCache blockCache = cacheConf.getBlockCache().get(); long offset = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); boolean isCached = blockCache.getBlock(blockCacheKey, true, false, true) != null; if (block.getBlockType() == BlockType.DATA || block.getBlockType() == BlockType.ROOT_INDEX || block.getBlockType() == BlockType.INTERMEDIATE_INDEX) { assertTrue(isCached); } offset += block.getOnDiskSizeWithHeader(); } }
HFileBlock.FILL_HEADER, -1, -1, -1, meta); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); ByteBuffer block1Buffer = ByteBuffer.allocate(length);
private void testEncodingWithCacheInternals(boolean useTag) throws IOException { List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag); HFileBlock block = getSampleHFileBlock(kvs, useTag); HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag); LruBlockCache blockCache = new LruBlockCache(8 * 1024 * 1024, 32 * 1024); BlockCacheKey cacheKey = new BlockCacheKey("test", 0); blockCache.cacheBlock(cacheKey, cacheBlock); HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true); assertTrue(heapSize instanceof HFileBlock); HFileBlock returnedBlock = (HFileBlock) heapSize; if (blockEncoder.getDataBlockEncoding() == DataBlockEncoding.NONE) { assertEquals(block.getBufferReadOnly(), returnedBlock.getBufferReadOnly()); } else { if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) { System.out.println(blockEncoder); } assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType()); } }
BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
/** * @param bc The block cache instance. * @param cc Cache config. * @param doubling If true, addition of element ups counter by 2, not 1, because element added * to onheap and offheap caches. * @param sizing True if we should run sizing test (doesn't always apply). */ void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling, final boolean sizing) { assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); BlockCacheKey bck = new BlockCacheKey("f", 0); Cacheable c = new DataCacheEntry(); // Do asserts on block counting. long initialBlockCount = bc.getBlockCount(); bc.cacheBlock(bck, c, cc.isInMemory()); assertEquals(doubling ? 2 : 1, bc.getBlockCount() - initialBlockCount); bc.evictBlock(bck); assertEquals(initialBlockCount, bc.getBlockCount()); // Do size accounting. Do it after the above 'warm-up' because it looks like some // buffers do lazy allocation so sizes are off on first go around. if (sizing) { long originalSize = bc.getCurrentSize(); bc.cacheBlock(bck, c, cc.isInMemory()); assertTrue(bc.getCurrentSize() > originalSize); bc.evictBlock(bck); long size = bc.getCurrentSize(); assertEquals(originalSize, size); } }