/** * @param bufferSize Size of each buffer created by this pool. * @param maxPoolSize Max number of buffers to keep in this pool. * @param directByteBuffer Whether to create direct ByteBuffer or on heap ByteBuffer. */ public ByteBufferPool(int bufferSize, int maxPoolSize, boolean directByteBuffer) { this.bufferSize = bufferSize; this.maxPoolSize = maxPoolSize; this.directByteBuffer = directByteBuffer; // TODO can add initialPoolSize config also and make those many BBs ready for use. LOG.info("Created with bufferSize={} and maxPoolSize={}", org.apache.hadoop.util.StringUtils.byteDesc(bufferSize), org.apache.hadoop.util.StringUtils.byteDesc(maxPoolSize)); this.count = new AtomicInteger(0); }
@Override public String toString() { return String.format( "size [%s, %s, %s); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;" + " major period %d, major jitter %f, min locality to compact %f;" + " tiered compaction: max_age %d, incoming window min %d," + " compaction policy for tiered window %s, single output for minor %b," + " compaction window factory %s", StringUtils.byteDesc(minCompactSize), StringUtils.byteDesc(maxCompactSize), StringUtils.byteDesc(offPeakMaxCompactSize), minFilesToCompact, maxFilesToCompact, compactionRatio, offPeakCompactionRatio, throttlePoint, majorCompactionPeriod, majorCompactionJitter, minLocalityToForceCompact, dateTieredMaxStoreFileAgeMillis, dateTieredIncomingWindowMin, compactionPolicyForDateTieredWindow, dateTieredSingleOutputForMinorCompaction, dateTieredCompactionWindowFactory ); }
@Override public String toString() { StringBuilder sb = new StringBuilder(32); return sb.append("SnapshotWithSize:[").append(name).append(" ") .append(StringUtils.byteDesc(size)).append("]").toString(); } }
private static LruBlockCache createOnHeapCache(final Configuration c) { final long cacheSize = MemorySizeUtil.getOnHeapCacheSize(c); if (cacheSize < 0) { return null; } int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); LOG.info( "Allocating onheap LruBlockCache size=" + StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); return new LruBlockCache(cacheSize, blockSize, true, c); }
private void verifyCapacityAndClasses(long capacitySize, String ioclass, String mapclass) throws IOException { if (capacitySize != cacheCapacity) { throw new IOException("Mismatched cache capacity:" + StringUtils.byteDesc(capacitySize) + ", expected: " + StringUtils.byteDesc(cacheCapacity)); } if (!ioEngine.getClass().getName().equals(ioclass)) { throw new IOException("Class name for IO engine mismatch: " + ioclass + ", expected:" + ioEngine.getClass().getName()); } if (!backingMap.getClass().getName().equals(mapclass)) { throw new IOException("Class name for cache map mismatch: " + mapclass + ", expected:" + backingMap.getClass().getName()); } }
public long free(long toFree) { if (LOG.isTraceEnabled()) { LOG.trace("freeing " + StringUtils.byteDesc(toFree) + " from " + this); } LruCachedBlock cb; long freedBytes = 0; while ((cb = queue.pollLast()) != null) { freedBytes += evictBlock(cb, true); if (freedBytes >= toFree) { return freedBytes; } } if (LOG.isTraceEnabled()) { LOG.trace("freed " + StringUtils.byteDesc(freedBytes) + " from " + this); } return freedBytes; }
String msg = "Only " + StringUtils.byteDesc(totalSpace) + " total space under " + filePath + ", not enough for requested " + StringUtils.byteDesc(sizePerFile); LOG.warn(msg); fileChannels[i] = rafs[i].getChannel(); channelLocks[i] = new ReentrantLock(); LOG.info("Allocating cache " + StringUtils.byteDesc(sizePerFile) + ", on the path:" + filePath); } catch (IOException fex) {
/** * We allocate a number of byte buffers as the capacity. In order not to out * of the array bounds for the last byte(see {@link ByteBufferArray#multiple}), * we will allocate one additional buffer with capacity 0; * @param capacity total size of the byte buffer array * @param allocator the ByteBufferAllocator that will create the buffers * @throws IOException throws IOException if there is an exception thrown by the allocator */ public ByteBufferArray(long capacity, ByteBufferAllocator allocator) throws IOException { this.bufferSize = DEFAULT_BUFFER_SIZE; if (this.bufferSize > (capacity / 16)) this.bufferSize = (int) roundUp(capacity / 16, 32768); this.bufferCount = (int) (roundUp(capacity, bufferSize) / bufferSize); LOG.info("Allocating buffers total=" + StringUtils.byteDesc(capacity) + ", sizePerBuffer=" + StringUtils.byteDesc(bufferSize) + ", count=" + bufferCount); buffers = new ByteBuffer[bufferCount + 1]; createBuffers(allocator); }
public FileMmapEngine(String filePath, long capacity) throws IOException { this.path = filePath; this.size = capacity; long fileSize = 0; try { raf = new RandomAccessFile(filePath, "rw"); fileSize = roundUp(capacity, ByteBufferArray.DEFAULT_BUFFER_SIZE); raf.setLength(fileSize); fileChannel = raf.getChannel(); LOG.info("Allocating " + StringUtils.byteDesc(fileSize) + ", on the path:" + filePath); } catch (java.io.FileNotFoundException fex) { LOG.error("Can't create bucket cache file " + filePath, fex); throw fex; } catch (IOException ioex) { LOG.error("Can't extend bucket cache file; insufficient space for " + StringUtils.byteDesc(fileSize), ioex); shutdown(); throw ioex; } ByteBufferAllocator allocator = new ByteBufferAllocator() { AtomicInteger pos = new AtomicInteger(0); @Override public ByteBuffer allocate(long size) throws IOException { ByteBuffer buffer = fileChannel.map(java.nio.channels.FileChannel.MapMode.READ_WRITE, pos.getAndIncrement() * size, size); return buffer; } }; bufferArray = new ByteBufferArray(fileSize, allocator); }
/** * Utility method broken out of internalPrepareFlushCache so that method is smaller. */ private void logFatLineOnFlush(Collection<HStore> storesToFlush, long sequenceId) { if (!LOG.isInfoEnabled()) { return; } // Log a fat line detailing what is being flushed. StringBuilder perCfExtras = null; if (!isAllFamilies(storesToFlush)) { perCfExtras = new StringBuilder(); for (HStore store: storesToFlush) { MemStoreSize mss = store.getFlushableSize(); perCfExtras.append("; ").append(store.getColumnFamilyName()); perCfExtras.append("={dataSize=") .append(StringUtils.byteDesc(mss.getDataSize())); perCfExtras.append(", heapSize=") .append(StringUtils.byteDesc(mss.getHeapSize())); perCfExtras.append(", offHeapSize=") .append(StringUtils.byteDesc(mss.getOffHeapSize())); perCfExtras.append("}"); } } MemStoreSize mss = this.memStoreSizing.getMemStoreSize(); LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + " column families," + " dataSize=" + StringUtils.byteDesc(mss.getDataSize()) + " heapSize=" + StringUtils.byteDesc(mss.getHeapSize()) + ((perCfExtras != null && perCfExtras.length() > 0)? perCfExtras.toString(): "") + ((wal != null) ? "" : "; WAL is null, using passed sequenceid=" + sequenceId)); }
private MemStoreChunkPool initializePool(String label, long globalMemStoreSize, float poolSizePercentage, float initialCountPercentage, int chunkSize, HeapMemoryManager heapMemoryManager) { if (poolSizePercentage <= 0) { LOG.info("{} poolSizePercentage is less than 0. So not using pool", label); return null; } if (chunkPoolDisabled) { return null; } if (poolSizePercentage > 1.0) { throw new IllegalArgumentException( MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY + " must be between 0.0 and 1.0"); } int maxCount = (int) (globalMemStoreSize * poolSizePercentage / chunkSize); if (initialCountPercentage > 1.0 || initialCountPercentage < 0) { throw new IllegalArgumentException(label + " " + MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + " must be between 0.0 and 1.0"); } int initialCount = (int) (initialCountPercentage * maxCount); LOG.info("Allocating {} MemStoreChunkPool with chunk size {}, max count {}, initial count {}", label, StringUtils.byteDesc(chunkSize), maxCount, initialCount); MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(label, chunkSize, maxCount, initialCount, poolSizePercentage); if (heapMemoryManager != null && memStoreChunkPool != null) { // Register with Heap Memory manager heapMemoryManager.registerTuneObserver(memStoreChunkPool); } return memStoreChunkPool; }
protected final void logRollAndSetupWalProps(Path oldPath, Path newPath, long oldFileLen) { int oldNumEntries = this.numEntries.getAndSet(0); String newPathString = newPath != null ? CommonFSUtils.getPath(newPath) : null; if (oldPath != null) { this.walFile2Props.put(oldPath, new WalProps(this.sequenceIdAccounting.resetHighest(), oldFileLen)); this.totalLogSize.addAndGet(oldFileLen); LOG.info("Rolled WAL {} with entries={}, filesize={}; new WAL {}", CommonFSUtils.getPath(oldPath), oldNumEntries, StringUtils.byteDesc(oldFileLen), newPathString); } else { LOG.info("New WAL {}", newPathString); } }
@Override public String toString() { return MoreObjects.toStringHelper(this) .add("blockCount", getBlockCount()) .add("currentSize", StringUtils.byteDesc(getCurrentSize())) .add("freeSize", StringUtils.byteDesc(getFreeSize())) .add("maxSize", StringUtils.byteDesc(getMaxSize())) .add("heapSize", StringUtils.byteDesc(heapSize())) .add("minSize", StringUtils.byteDesc(minSize())) .add("minFactor", minFactor) .add("multiSize", StringUtils.byteDesc(multiSize())) .add("multiFactor", multiFactor) .add("singleSize", StringUtils.byteDesc(singleSize())) .add("singleFactor", singleFactor) .toString(); }
public CompactingMemStore(Configuration conf, CellComparator c, HStore store, RegionServicesForStores regionServices, MemoryCompactionPolicy compactionPolicy) throws IOException { super(conf, c, regionServices); this.store = store; this.regionServices = regionServices; this.pipeline = new CompactionPipeline(getRegionServices()); this.compactor = createMemStoreCompactor(compactionPolicy); if (conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) { // if user requested to work with MSLABs (whether on- or off-heap), then the // immutable segments are going to use CellChunkMap as their index indexType = IndexType.CHUNK_MAP; } else { indexType = IndexType.ARRAY_MAP; } // initialization of the flush size should happen after initialization of the index type // so do not transfer the following method initInmemoryFlushSize(conf); LOG.info("Store={}, in-memory flush size threshold={}, immutable segments index type={}, " + "compactor={}", this.store.getColumnFamilyName(), StringUtils.byteDesc(this.inmemoryFlushSize), this.indexType, (this.compactor == null? "NULL": this.compactor.toString())); }
public void logStats() { // Log size long totalSize = heapSize(); long freeSize = maxSize - totalSize; LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount() + ", " + "evictedPerRun=" + stats.evictedPerEviction()); }
public void logStats() { long totalSize = bucketAllocator.getTotalSize(); long usedSize = bucketAllocator.getUsedSize(); long freeSize = totalSize - usedSize; long cacheSize = getRealCacheSize(); LOG.info("failedBlockAdditions=" + cacheStats.getFailedInserts() + ", " + "totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + "usedSize=" + StringUtils.byteDesc(usedSize) +", " + "cacheSize=" + StringUtils.byteDesc(cacheSize) +", " + "accesses=" + cacheStats.getRequestCount() + ", " + "hits=" + cacheStats.getHitCount() + ", " + "IOhitsPerSecond=" + cacheStats.getIOHitsPerSecond() + ", " + "IOTimePerHit=" + String.format("%.2f", cacheStats.getIOTimePerHit())+ ", " + "hitRatio=" + (cacheStats.getHitCount() == 0 ? "0," : (StringUtils.formatPercent(cacheStats.getHitRatio(), 2)+ ", ")) + "cachingAccesses=" + cacheStats.getRequestCachingCount() + ", " + "cachingHits=" + cacheStats.getHitCachingCount() + ", " + "cachingHitsRatio=" +(cacheStats.getHitCachingCount() == 0 ? "0," : (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2)+ ", ")) + "evictions=" + cacheStats.getEvictionCount() + ", " + "evicted=" + cacheStats.getEvictedCount() + ", " + "evictedPerRun=" + cacheStats.evictedPerEviction()); cacheStats.reset(); }
StringUtils.byteDesc(snapshot.getDataSize()), cacheFlushId, writer.hasGeneralBloom(), writer.getPath()); result.add(writer.getPath());