private void allocateMultiple(MemoryBuffer[] dest, int size) { if (allocator != null) { allocator.allocateMultiple(dest, size, bufferFactory, isStopped); } else { bufferManager.getAllocator().allocateMultiple(dest, size, bufferFactory); } }
private void discardUncachedBuffers(List<MemoryBuffer> list) { for (MemoryBuffer buffer : list) { bufferManager.getAllocator().deallocate(buffer); } }
private static int determineAllocSize(BufferUsageManager bufferManager, Configuration conf) { long allocSize = HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_ENCODE_ALLOC_SIZE); int maxAllocSize = bufferManager.getAllocator().getMaxAllocation(); if (allocSize > maxAllocSize) { LlapIoImpl.LOG.error("Encode allocation size " + allocSize + " is being capped to the maximum " + "allocation size " + bufferManager.getAllocator().getMaxAllocation()); allocSize = maxAllocSize; } return (int)allocSize; }
return len; int maxAlloc = cache.getAllocator().getMaxAllocation(); largeBuffers = new MemoryBuffer[largeBufCount]; allocator.allocateMultiple(largeBuffers, maxAlloc, cache.getDataBufferFactory()); for (int i = 0; i < largeBuffers.length; ++i) { smallBuffer = new MemoryBuffer[1]; allocator.allocateMultiple(smallBuffer, smallSize, cache.getDataBufferFactory()); ByteBuffer bb = smallBuffer[0].getByteBufferRaw(); copyDiskDataToCacheBuffer(array, for (MemoryBuffer buffer : largeBuffers) { if (buffer == null) continue; allocator.deallocate(buffer); allocator.deallocate(smallBuffer[0]);
cacheWrapper.getAllocator().isDirectAlloc()); toRelease = new IdentityHashMap<>(); DiskRangeList drl = toRead.next;
BufferChunk lastChunk, List<ProcCacheChunk> toDecompress, List<MemoryBuffer> cacheBuffers) { MemoryBuffer futureAlloc = cacheWrapper.getAllocator().createUnallocated();
isDataReaderOpen = true; dataReader.readFileData(toRead.next, stripeOffset, cacheWrapper.getAllocator().isDirectAlloc()); toRelease = new IdentityHashMap<>(); DiskRangeList drl = toRead.next;
public void discardData() { LlapIoImpl.LOG.debug("Discarding disk data (if any wasn't cached)"); for (CacheStripeData stripe : stripes) { if (stripe.colStreams == null || stripe.colStreams.isEmpty()) continue; for (List<CacheStreamData> streams : stripe.colStreams.values()) { for (CacheStreamData cos : streams) { for (MemoryBuffer buffer : cos.data) { if (LlapIoImpl.CACHE_LOGGER.isTraceEnabled()) { LlapIoImpl.CACHE_LOGGER.trace("Deallocating " + buffer); } bufferManager.getAllocator().deallocate(buffer); } } } stripe.colStreams.clear(); } }
private void allocateMultiple(MemoryBuffer[] dest, int size) { if (allocator != null) { allocator.allocateMultiple(dest, size, cacheWrapper.getDataBufferFactory(), isStopped); } else { cacheWrapper.getAllocator().allocateMultiple(dest, size, cacheWrapper.getDataBufferFactory()); } }
private int determineUncompressedPartSize() { // We will break the uncompressed data in the cache in the chunks that are the size // of the prevalent ORC compression buffer (the default), or maximum allocation (since we // cannot allocate bigger chunks), whichever is less. long orcCbSizeDefault = ((Number)OrcConf.BUFFER_SIZE.getDefaultValue()).longValue(); int maxAllocSize = cacheWrapper.getAllocator().getMaxAllocation(); return (int)Math.min(maxAllocSize, orcCbSizeDefault); }
private void unlockBuffer(LlapAllocatorBuffer buffer) { if (buffer.decRef() == 0) { if (LlapIoImpl.CACHE_LOGGER.isTraceEnabled()) { LlapIoImpl.CACHE_LOGGER.trace("Deallocating {} that was not cached", buffer); } allocator.deallocate(buffer); } metrics.decrCacheNumLockedBuffers(); }
private static CacheChunk copyAndReplaceCandidateToNonCached( UncompressedCacheChunk candidateCached, long partOffset, long candidateEnd, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { // We thought we had the entire part to cache, but we don't; convert start to // non-cached. Since we are at the first gap, the previous stuff must be contiguous. singleAlloc[0] = null; cacheWrapper.getAllocator().allocateMultiple(singleAlloc, (int)(candidateEnd - partOffset)); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = POOLS.tccPool.take(); tcc.init(buffer, partOffset, candidateEnd); copyAndReplaceUncompressedChunks(candidateCached, dest, tcc); return tcc; }
private int determineUncompressedPartSize() { // We will break the uncompressed data in the cache in the chunks that are the size // of the prevalent ORC compression buffer (the default), or maximum allocation (since we // cannot allocate bigger chunks), whichever is less. long orcCbSizeDefault = ((Number)OrcConf.BUFFER_SIZE.getDefaultValue()).longValue(); int maxAllocSize = cacheWrapper.getAllocator().getMaxAllocation(); return (int)Math.min(maxAllocSize, orcCbSizeDefault); }
private void unlockBuffer(LlapSerDeDataBuffer buffer, boolean handleLastDecRef) { boolean isLastDecref = (buffer.decRef() == 0); if (handleLastDecRef && isLastDecref) { if (buffer.isCached) { cachePolicy.notifyUnlock(buffer); } else { if (LlapIoImpl.CACHE_LOGGER.isTraceEnabled()) { LlapIoImpl.CACHE_LOGGER.trace("Deallocating {} that was not cached", buffer); } allocator.deallocate(buffer); } } metrics.decrCacheNumLockedBuffers(); }
++ix; cacheWrapper.getAllocator().allocateMultiple(targetBuffers, bufferSize);
private static int determineAllocSize(BufferUsageManager bufferManager, Configuration conf) { long allocSize = HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_ENCODE_ALLOC_SIZE); int maxAllocSize = bufferManager.getAllocator().getMaxAllocation(); if (allocSize > maxAllocSize) { LlapIoImpl.LOG.error("Encode allocation size " + allocSize + " is being capped to the maximum " + "allocation size " + bufferManager.getAllocator().getMaxAllocation()); allocSize = maxAllocSize; } return (int)allocSize; }
private void unlockBuffer(LlapDataBuffer buffer, boolean handleLastDecRef) { boolean isLastDecref = (buffer.decRef() == 0); if (handleLastDecRef && isLastDecref) { // This is kind of not pretty, but this is how we detect whether buffer was cached. // We would always set this for lookups at put time. if (buffer.declaredCachedLength != LlapDataBuffer.UNKNOWN_CACHED_LENGTH) { cachePolicy.notifyUnlock(buffer); } else { if (LlapIoImpl.CACHE_LOGGER.isTraceEnabled()) { LlapIoImpl.CACHE_LOGGER.trace("Deallocating {} that was not cached", buffer); } allocator.deallocate(buffer); } } metrics.decrCacheNumLockedBuffers(); }
private static CacheChunk copyAndReplaceUncompressedToNonCached( BufferChunk bc, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { singleAlloc[0] = null; cacheWrapper.getAllocator().allocateMultiple(singleAlloc, bc.getLength()); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = POOLS.tccPool.take(); tcc.init(buffer, bc.getOffset(), bc.getEnd()); copyUncompressedChunk(bc.getChunk(), dest); bc.replaceSelfWith(tcc); return tcc; }
@Override public void handleCacheCollision(DataCache cacheWrapper, MemoryBuffer replacementBuffer, List<MemoryBuffer> cacheBuffers) { assert originalCbIndex >= 0; // Had the put succeeded for our new buffer, it would have refcount of 2 - 1 from put, // and 1 from notifyReused call above. "Old" buffer now has the 1 from put; new buffer // is not in cache. cacheWrapper.getAllocator().deallocate(getBuffer()); cacheWrapper.reuseBuffer(replacementBuffer); // Replace the buffer in our big range list, as well as in current results. this.buffer = replacementBuffer; cacheBuffers.set(originalCbIndex, replacementBuffer); originalCbIndex = -1; // This can only happen once at decompress time. }
cacheWrapper.getAllocator().allocateMultiple( targetBuffers, (int)(partCount == 1 ? streamLen : partSize));