private static int compareCacheBlock(Cacheable left, Cacheable right, boolean includeNextBlockMetadata) { ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); left.serialize(l, includeNextBlockMetadata); ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); right.serialize(r, includeNextBlockMetadata); return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), r.limit()); }
/** * Pass through a too big entry and ensure it is cleared from queues and ramCache. * Manually run the WriterThread. * @throws InterruptedException */ @Test public void testTooBigEntry() throws InterruptedException { Cacheable tooBigCacheable = Mockito.mock(Cacheable.class); Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE); this.bc.cacheBlock(this.plainKey, tooBigCacheable); doDrainOfOneEntry(this.bc, this.wt, this.q); }
final LongAdder realCacheSize) throws CacheFullException, IOException, BucketAllocatorException { int len = data.getSerializedLength();
private int compare(Cacheable left, Cacheable right) { ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); left.serialize(l); ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); right.serialize(r); return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), r.limit()); }
/** * Cache the block with the specified key and buffer. First finds what size * SingleSlabCache it should fit in. If the block doesn't fit in any, it will * return without doing anything. * <p> * It is assumed this will NEVER be called on an already cached block. If that * is done, it is assumed that you are reinserting the same exact block due to * a race condition, and will throw a runtime exception. * * @param cacheKey block cache key * @param cachedItem block buffer */ public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem) { Entry<Integer, SingleSizeCache> scacheEntry = getHigherBlock(cachedItem .getSerializedLength()); this.requestStats.addin(cachedItem.getSerializedLength()); if (scacheEntry == null) { return; // we can't cache, something too big. } this.successfullyCachedStats.addin(cachedItem.getSerializedLength()); SingleSizeCache scache = scacheEntry.getValue(); /* * This will throw a runtime exception if we try to cache the same value * twice */ scache.cacheBlock(cacheKey, cachedItem); }
@Override public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) { ByteBuffer storedBlock; try { storedBlock = backingStore.alloc(toBeCached.getSerializedLength()); } catch (InterruptedException e) { LOG.warn("SlabAllocator was interrupted while waiting for block to become available"); LOG.warn(e); return; } CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(), storedBlock); toBeCached.serialize(storedBlock); synchronized (this) { CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry); if (alreadyCached != null) { backingStore.free(storedBlock); throw new RuntimeException("already cached " + blockName); } if (actionWatcher != null) { actionWatcher.onInsertion(blockName, this); } } newEntry.recentlyAccessed.set(System.nanoTime()); this.size.addAndGet(newEntry.heapSize()); }
/** * Pass through a too big entry and ensure it is cleared from queues and ramCache. * Manually run the WriterThread. * @throws InterruptedException */ @Test public void testTooBigEntry() throws InterruptedException { Cacheable tooBigCacheable = Mockito.mock(Cacheable.class); Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE); this.bc.cacheBlock(this.plainKey, tooBigCacheable); doDrainOfOneEntry(this.bc, this.wt, this.q); }
final AtomicLong realCacheSize) throws CacheFullException, IOException, BucketAllocatorException { int len = data.getSerializedLength();