/** * @param cb * @return The block content of <code>bc</code> as a String minus the filename. */ public static String toStringMinusFileName(final CachedBlock cb, final long now) { return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age=" + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority=" + cb.getBlockPriority(); }
private void iterateBlockCache(BlockCache cache, Iterator<CachedBlock> iterator) { int refCount; while (iterator.hasNext()) { CachedBlock next = iterator.next(); BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey); } else { continue; } assertEquals(0, refCount); } }
/** * @param cb * @return True if full.... if we won't be adding any more. */ public boolean update(final CachedBlock cb) { if (isFull()) return true; NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename()); if (set == null) { set = new ConcurrentSkipListSet<>(); this.cachedBlockByFile.put(cb.getFilename(), set); } set.add(cb); this.size += cb.getSize(); this.count++; BlockType bt = cb.getBlockType(); if (bt != null && bt.isData()) { this.dataBlockCount++; this.dataSize += cb.getSize(); } long age = (this.now - cb.getCachedTime())/NANOS_PER_SECOND; this.hist.add(age, 1); return false; }
/** * @param filename * @param blocks * @return A JSON String of <code>filename</code> and counts of <code>blocks</code> * @throws JsonGenerationException * @throws JsonMappingException * @throws IOException */ public static String toJSON(final String filename, final NavigableSet<CachedBlock> blocks) throws JsonGenerationException, JsonMappingException, IOException { CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename); for (CachedBlock cb: blocks) { counts.count++; counts.size += cb.getSize(); BlockType bt = cb.getBlockType(); if (bt != null && bt.isData()) { counts.countData++; counts.sizeData += cb.getSize(); } } return MAPPER.writeValueAsString(counts); }
/** * @param cb * @return The block content as String. */ public static String toString(final CachedBlock cb, final long now) { return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now); }
assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); assertNotEquals(BlockType.DATA, block.getBlockType());
/** * Attempt to add the specified cached block to this queue. * * <p>If the queue is smaller than the max size, or if the specified element * is ordered before the smallest element in the queue, the element will be * added to the queue. Otherwise, there is no side effect of this call. * @param cb block to try to add to the queue */ public void add(CachedBlock cb) { if(heapSize < maxSize) { queue.add(cb); heapSize += cb.heapSize(); } else { CachedBlock head = queue.peek(); if(cb.compareTo(head) > 0) { heapSize += cb.heapSize(); heapSize -= head.heapSize(); if(heapSize > maxSize) { queue.poll(); } else { heapSize += head.heapSize(); } queue.add(cb); } } }
/** * Get the buffer of the block with the specified name. * @param cacheKey block's cache key * @param caching true if the caller caches blocks on cache misses * @param repeat Whether this is a repeat lookup for the same block * (used to avoid double counting cache misses when doing double-check locking) * {@see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)} * @return buffer of specified cache key, or null if not in cache */ @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat) { CachedBlock cb = map.get(cacheKey); if(cb == null) { if (!repeat) stats.miss(caching); return null; } stats.hit(caching); cb.access(count.incrementAndGet()); return cb.getBuffer(); }
/** * Cache the block with the specified name and buffer. * <p> * It is assumed this will NEVER be called on an already cached block. If * that is done, an exception will be thrown. * @param cacheKey block's cache key * @param buf block buffer * @param inMemory if block is in-memory */ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { CachedBlock cb = map.get(cacheKey); if(cb != null) { throw new RuntimeException("Cached an already cached block"); } cb = new CachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory); long newSize = updateSizeMetrics(cb, false); map.put(cacheKey, cb); elements.incrementAndGet(); if(newSize > acceptableSize() && !evictionInProgress) { runEviction(); } }
/** * @param cb * @return True if full.... if we won't be adding any more. */ public boolean update(final CachedBlock cb) { if (isFull()) return true; NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename()); if (set == null) { set = new ConcurrentSkipListSet<CachedBlock>(); this.cachedBlockByFile.put(cb.getFilename(), set); } set.add(cb); this.size += cb.getSize(); this.count++; BlockType bt = cb.getBlockType(); if (bt != null && bt.isData()) { this.dataBlockCount++; this.dataSize += cb.getSize(); } long age = this.now - cb.getCachedTime(); this.age.update(age); return false; }
private void logPerFile(final BlockCacheUtil.CachedBlocksByFile cbsbf) throws IOException { for (Map.Entry<String, NavigableSet<CachedBlock>> e: cbsbf.getCachedBlockStatsByFile().entrySet()) { int count = 0; long size = 0; int countData = 0; long sizeData = 0; for (CachedBlock cb: e.getValue()) { count++; size += cb.getSize(); BlockType bt = cb.getBlockType(); if (bt != null && bt.isData()) { countData++; sizeData += cb.getSize(); } } LOG.info("filename=" + e.getKey() + ", count=" + count + ", countData=" + countData + ", size=" + size + ", sizeData=" + sizeData); LOG.info(BlockCacheUtil.toJSON(e.getKey(), e.getValue())); } }
/** * @param cb * @return The block content as String. */ public static String toString(final CachedBlock cb, final long now) { return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now); }
assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); assertNotEquals(BlockType.DATA, block.getBlockType());
private void clearBlockCache(BlockCache blockCache) throws InterruptedException { if (blockCache instanceof LruBlockCache) { ((LruBlockCache) blockCache).clearCache(); } else { // BucketCache may not return all cached blocks(blocks in write queue), so check it here. for (int clearCount = 0; blockCache.getBlockCount() > 0; clearCount++) { if (clearCount > 0) { LOG.warn("clear block cache " + blockCache + " " + clearCount + " times, " + blockCache.getBlockCount() + " blocks remaining"); Thread.sleep(10); } for (CachedBlock block : Lists.newArrayList(blockCache)) { BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset()); // CombinedBucketCache may need evict two times. for (int evictCount = 0; blockCache.evictBlock(key); evictCount++) { if (evictCount > 1) { LOG.warn("evict block " + block + " in " + blockCache + " " + evictCount + " times, maybe a bug here"); } } } } } }
/** * @param cb * @return The block content of <code>bc</code> as a String minus the filename. */ public static String toStringMinusFileName(final CachedBlock cb, final long now) { return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age=" + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority=" + cb.getBlockPriority(); }
/** * @param filename * @param blocks * @return A JSON String of <code>filename</code> and counts of <code>blocks</code> * @throws JsonGenerationException * @throws JsonMappingException * @throws IOException */ public static String toJSON(final String filename, final NavigableSet<CachedBlock> blocks) throws JsonGenerationException, JsonMappingException, IOException { CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename); for (CachedBlock cb: blocks) { counts.count++; counts.size += cb.getSize(); BlockType bt = cb.getBlockType(); if (bt != null && bt.isData()) { counts.countData++; counts.sizeData += cb.getSize(); } } return MAPPER.writeValueAsString(counts); }
@Override public void run() { Scan s = new Scan().withStartRow(ROW4).withStopRow(ROW5).setCaching(1); try { while(!doScan.get()) { try { // Sleep till you start scan Thread.sleep(1); } catch (InterruptedException e) { } } List<BlockCacheKey> cacheList = new ArrayList<>(); Iterator<CachedBlock> iterator = cache.iterator(); // evict all the blocks while (iterator.hasNext()) { CachedBlock next = iterator.next(); BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); cacheList.add(cacheKey); // evict what ever is available cache.evictBlock(cacheKey); } try (ResultScanner scanner = table.getScanner(s)) { while (scanner.next() != null) { } } compactReadLatch.countDown(); } catch (IOException e) { } } }
private void logPerFile(final BlockCacheUtil.CachedBlocksByFile cbsbf) throws IOException { for (Map.Entry<String, NavigableSet<CachedBlock>> e: cbsbf.getCachedBlockStatsByFile().entrySet()) { int count = 0; long size = 0; int countData = 0; long sizeData = 0; for (CachedBlock cb: e.getValue()) { count++; size += cb.getSize(); BlockType bt = cb.getBlockType(); if (bt != null && bt.isData()) { countData++; sizeData += cb.getSize(); } } LOG.info("filename=" + e.getKey() + ", count=" + count + ", countData=" + countData + ", size=" + size + ", sizeData=" + sizeData); LOG.info(BlockCacheUtil.toJSON(e.getKey(), e.getValue())); } }
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); cacheList.add(cacheKey); cache.evictBlock(cacheKey); while (iterator.hasNext()) { CachedBlock next = iterator.next(); BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); newCacheList.add(cacheKey);
while (iterator.hasNext()) { CachedBlock next = iterator.next(); BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRefCount(cacheKey);