final LongAdder realCacheSize) throws CacheFullException, IOException, BucketAllocatorException { int len = data.getSerializedLength(); : new SharedMemoryBucketEntry(offset, len, accessCounter, inMemory) : new BucketEntry(offset, len, accessCounter, inMemory); bucketEntry.setDeserialiserReference(data.getDeserializer()); try { if (data instanceof HFileBlock) { } else { ByteBuffer bb = ByteBuffer.allocate(len); data.serialize(bb, true); ioEngine.write(bb, offset);
if (buf.heapSize() > maxBlockSize) { + cacheKey.getHfileName() + " @ " + cacheKey.getOffset() + " is " + buf.heapSize() + " which is larger than " + maxBlockSize); map.put(cacheKey, cb); long val = elements.incrementAndGet(); if (buf.getBlockType().isData()) { dataBlockElements.increment();
@Override public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { if (block.getMemoryType() == MemoryType.SHARED) { BucketEntry bucketEntry = backingMap.get(cacheKey); if (bucketEntry != null) { int refCount = bucketEntry.decrementRefCountAndGet(); if (refCount == 0 && bucketEntry.isMarkedForEvict()) { forceEvict(cacheKey); } } } }
private static int compareCacheBlock(Cacheable left, Cacheable right, boolean includeNextBlockMetadata) { ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); left.serialize(l, includeNextBlockMetadata); ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); right.serialize(r, includeNextBlockMetadata); return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), r.limit()); }
private RAMQueueEntry checkRamCache(BlockCacheKey cacheKey) { RAMQueueEntry removedBlock = ramCache.remove(cacheKey); if (removedBlock != null) { this.blockNumber.decrement(); this.heapSize.add(-1 * removedBlock.getData().heapSize()); } return removedBlock; }
@VisibleForTesting Map<BlockType, Integer> getBlockTypeCountsForTest() { Map<BlockType, Integer> counts = new EnumMap<>(BlockType.class); for (LruCachedBlock cb : map.values()) { BlockType blockType = cb.getBuffer().getBlockType(); Integer count = counts.get(blockType); counts.put(blockType, (count == null ? 0 : count) + 1); } return counts; }
/** * Pass through a too big entry and ensure it is cleared from queues and ramCache. * Manually run the WriterThread. * @throws InterruptedException */ @Test public void testTooBigEntry() throws InterruptedException { Cacheable tooBigCacheable = Mockito.mock(Cacheable.class); Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE); this.bc.cacheBlock(this.plainKey, tooBigCacheable); doDrainOfOneEntry(this.bc, this.wt, this.q); }
public static void getBlockAndAssertEquals(BlockCache cache, BlockCacheKey key, Cacheable blockToCache, ByteBuffer destBuffer, ByteBuffer expectedBuffer) { destBuffer.clear(); cache.cacheBlock(key, blockToCache); Cacheable actualBlock = cache.getBlock(key, false, false, false); actualBlock.serialize(destBuffer, true); assertEquals(expectedBuffer, destBuffer); cache.returnBlock(key, actualBlock); } }
/** * Helper function that updates the local size counter and also updates any * per-cf or per-blocktype metrics it can discern from given * {@link CachedBlock} * * @param cb * @param evict */ protected long updateSizeMetrics(CachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); if (evict) { heapsize *= -1; } Cacheable cachedBlock = cb.getBuffer(); SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics(); if (schemaMetrics != null) { schemaMetrics.updateOnCachePutOrEvict( cachedBlock.getBlockType().getCategory(), heapsize, evict); } return size.addAndGet(heapsize); }
private int compare(Cacheable left, Cacheable right) { ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); left.serialize(l); ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); right.serialize(r); return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), r.limit()); }
} else { this.blockNumber.increment(); this.heapSize.add(cachedItem.heapSize()); blocksByHFile.add(cacheKey);
@Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA; if (metaBlock) { onHeapCache.cacheBlock(cacheKey, buf, inMemory); } else { l2Cache.cacheBlock(cacheKey, buf, inMemory); } }
/** * Cache the block with the specified key and buffer. First finds what size * SingleSlabCache it should fit in. If the block doesn't fit in any, it will * return without doing anything. * <p> * It is assumed this will NEVER be called on an already cached block. If that * is done, it is assumed that you are reinserting the same exact block due to * a race condition, and will throw a runtime exception. * * @param cacheKey block cache key * @param cachedItem block buffer */ public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem) { Entry<Integer, SingleSizeCache> scacheEntry = getHigherBlock(cachedItem .getSerializedLength()); this.requestStats.addin(cachedItem.getSerializedLength()); if (scacheEntry == null) { return; // we can't cache, something too big. } this.successfullyCachedStats.addin(cachedItem.getSerializedLength()); SingleSizeCache scache = scacheEntry.getValue(); /* * This will throw a runtime exception if we try to cache the same value * twice */ scache.cacheBlock(cacheKey, cachedItem); }
public static void getBlockAndAssertEquals(BlockCache cache, BlockCacheKey key, Cacheable blockToCache, ByteBuffer destBuffer, ByteBuffer expectedBuffer) { destBuffer.clear(); cache.cacheBlock(key, blockToCache); Cacheable actualBlock = cache.getBlock(key, false, false, false); actualBlock.serialize(destBuffer, true); assertEquals(expectedBuffer, destBuffer); cache.returnBlock(key, actualBlock); } }
@Override public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) { ByteBuffer storedBlock; try { storedBlock = backingStore.alloc(toBeCached.getSerializedLength()); } catch (InterruptedException e) { LOG.warn("SlabAllocator was interrupted while waiting for block to become available"); LOG.warn(e); return; } CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(), storedBlock); toBeCached.serialize(storedBlock); synchronized (this) { CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry); if (alreadyCached != null) { backingStore.free(storedBlock); throw new RuntimeException("already cached " + blockName); } if (actionWatcher != null) { actionWatcher.onInsertion(blockName, this); } } newEntry.recentlyAccessed.set(System.nanoTime()); this.size.addAndGet(newEntry.heapSize()); }
public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, boolean inMemory) { this.cacheKey = cacheKey; this.buf = buf; this.accessTime = accessTime; // We approximate the size of this class by the size of its name string // plus the size of its byte buffer plus the overhead associated with all // the base classes. We also include the base class // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with // their buffer lengths. This variable is used elsewhere in unit tests. this.size = ClassSize.align(cacheKey.heapSize()) + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; if(inMemory) { this.priority = BlockPriority.MEMORY; } else { this.priority = BlockPriority.SINGLE; } }
/** * Helper function that updates the local size counter and also updates any * per-cf or per-blocktype metrics it can discern from given * {@link LruCachedBlock} */ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); BlockType bt = cb.getBuffer().getBlockType(); if (evict) { heapsize *= -1; } if (bt != null && bt.isData()) { dataBlockSize.add(heapsize); } return size.addAndGet(heapsize); }
/** * Pass through a too big entry and ensure it is cleared from queues and ramCache. * Manually run the WriterThread. * @throws InterruptedException */ @Test public void testTooBigEntry() throws InterruptedException { Cacheable tooBigCacheable = Mockito.mock(Cacheable.class); Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE); this.bc.cacheBlock(this.plainKey, tooBigCacheable); doDrainOfOneEntry(this.bc, this.wt, this.q); }
cacheStats.ioHit(timeTaken); if (cachedBlock.getMemoryType() == MemoryType.SHARED) { bucketEntry.incrementRefCountAndGet();
final AtomicLong realCacheSize) throws CacheFullException, IOException, BucketAllocatorException { int len = data.getSerializedLength(); bucketEntry.setDeserialiserReference(data.getDeserializer(), deserialiserMap); try { if (data instanceof HFileBlock) { } else { ByteBuffer bb = ByteBuffer.allocate(len); data.serialize(bb); ioEngine.write(bb, offset);