private int determineUncompressedPartSize() { // We will break the uncompressed data in the cache in the chunks that are the size // of the prevalent ORC compression buffer (the default), or maximum allocation (since we // cannot allocate bigger chunks), whichever is less. long orcCbSizeDefault = ((Number)OrcConf.BUFFER_SIZE.getDefaultValue()).longValue(); int maxAllocSize = cacheWrapper.getAllocator().getMaxAllocation(); return (int)Math.min(maxAllocSize, orcCbSizeDefault); }
private int determineUncompressedPartSize() { // We will break the uncompressed data in the cache in the chunks that are the size // of the prevalent ORC compression buffer (the default), or maximum allocation (since we // cannot allocate bigger chunks), whichever is less. long orcCbSizeDefault = ((Number)OrcConf.BUFFER_SIZE.getDefaultValue()).longValue(); int maxAllocSize = cacheWrapper.getAllocator().getMaxAllocation(); return (int)Math.min(maxAllocSize, orcCbSizeDefault); }
@Override public void handleCacheCollision(DataCache cacheWrapper, MemoryBuffer replacementBuffer, List<MemoryBuffer> cacheBuffers) { assert originalCbIndex >= 0; // Had the put succeeded for our new buffer, it would have refcount of 2 - 1 from put, // and 1 from notifyReused call above. "Old" buffer now has the 1 from put; new buffer // is not in cache. cacheWrapper.getAllocator().deallocate(getBuffer()); cacheWrapper.reuseBuffer(replacementBuffer); // Replace the buffer in our big range list, as well as in current results. this.buffer = replacementBuffer; cacheBuffers.set(originalCbIndex, replacementBuffer); originalCbIndex = -1; // This can only happen once at decompress time. }
@Override public void handleCacheCollision(DataCache cacheWrapper, MemoryBuffer replacementBuffer, List<MemoryBuffer> cacheBuffers) { assert cacheBuffers == null; // This is done at pre-read stage where there's nothing special w/refcounts. Just release. cacheWrapper.getAllocator().deallocate(getBuffer()); // Replace the buffer in our big range list, as well as in current results. this.setBuffer(replacementBuffer); }
@Override public void handleCacheCollision(DataCache cacheWrapper, MemoryBuffer replacementBuffer, List<MemoryBuffer> cacheBuffers) { assert cacheBuffers == null; // This is done at pre-read stage where there's nothing special w/refcounts. Just release. cacheWrapper.getAllocator().deallocate(getBuffer()); // Replace the buffer in our big range list, as well as in current results. this.setBuffer(replacementBuffer); }
private void allocateMultiple(MemoryBuffer[] dest, int size) { if (allocator != null) { allocator.allocateMultiple(dest, size, cacheWrapper.getDataBufferFactory(), isStopped); } else { cacheWrapper.getAllocator().allocateMultiple(dest, size, cacheWrapper.getDataBufferFactory()); } }
BufferChunk lastChunk, List<ProcCacheChunk> toDecompress, List<MemoryBuffer> cacheBuffers) { MemoryBuffer futureAlloc = cacheWrapper.getAllocator().createUnallocated();
public EncodedReaderImpl(Object fileKey, List<OrcProto.Type> types, TypeDescription fileSchema, org.apache.orc.CompressionKind kind, WriterVersion version, int bufferSize, long strideRate, DataCache cacheWrapper, DataReader dataReader, PoolFactory pf, IoTrace trace, boolean useCodecPool, String tag) throws IOException { this.fileKey = fileKey; this.compressionKind = kind; this.isCompressed = kind != org.apache.orc.CompressionKind.NONE; this.isCodecFromPool = useCodecPool; this.codec = useCodecPool ? OrcCodecPool.getCodec(kind) : WriterImpl.createCodec(kind); this.types = types; this.fileSchema = fileSchema; // Note: this is redundant with types this.version = version; this.bufferSize = bufferSize; this.rowIndexStride = strideRate; this.cacheWrapper = cacheWrapper; Allocator alloc = cacheWrapper.getAllocator(); this.allocator = alloc instanceof StoppableAllocator ? (StoppableAllocator) alloc : null; this.dataReader = dataReader; this.trace = trace; this.tag = tag; if (POOLS != null) return; if (pf == null) { pf = new NoopPoolFactory(); } Pools pools = createPools(pf); synchronized (POOLS_CREATION_LOCK) { if (POOLS != null) return; POOLS = pools; } }
cacheWrapper.getAllocator().isDirectAlloc()); toRelease = new IdentityHashMap<>(); DiskRangeList drl = toRead.next;
private static CacheChunk copyAndReplaceCandidateToNonCached( UncompressedCacheChunk candidateCached, long partOffset, long candidateEnd, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { // We thought we had the entire part to cache, but we don't; convert start to // non-cached. Since we are at the first gap, the previous stuff must be contiguous. singleAlloc[0] = null; cacheWrapper.getAllocator().allocateMultiple(singleAlloc, (int)(candidateEnd - partOffset)); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = POOLS.tccPool.take(); tcc.init(buffer, partOffset, candidateEnd); copyAndReplaceUncompressedChunks(candidateCached, dest, tcc); return tcc; }
csd.getCacheBuffers().remove(chunk.getBuffer()); try { cacheWrapper.getAllocator().deallocate(chunk.getBuffer()); } catch (Throwable t) { LOG.error("Ignoring the cleanup error after another error", t);
++ix; cacheWrapper.getAllocator().allocateMultiple(targetBuffers, bufferSize);
private void releaseInitialRefcounts(DiskRangeList current) { while (current != null) { DiskRangeList toFree = current; current = current.next; if (toFree instanceof ProcCacheChunk) { ProcCacheChunk pcc = (ProcCacheChunk)toFree; if (pcc.originalData != null) { // TODO: can this still happen? we now clean these up explicitly to avoid other issues. // This can only happen in case of failure - we read some data, but didn't decompress // it. Deallocate the buffer directly, do not decref. if (pcc.getBuffer() != null) { cacheWrapper.getAllocator().deallocate(pcc.getBuffer()); } continue; } } if (!(toFree instanceof CacheChunk)) continue; CacheChunk cc = (CacheChunk)toFree; if (cc.getBuffer() == null) continue; MemoryBuffer buffer = cc.getBuffer(); cacheWrapper.releaseBuffer(buffer); cc.setBuffer(null); } }
return len; int maxAlloc = cache.getAllocator().getMaxAllocation(); FileSystem fs = path.getFileSystem(conf); FSDataInputStream is = fs.open(path, bufferSize); Allocator allocator = cache.getAllocator(); long sizeRead = 0; while (current != null) {
private static CacheChunk copyAndReplaceUncompressedToNonCached( BufferChunk bc, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { singleAlloc[0] = null; cacheWrapper.getAllocator().allocateMultiple(singleAlloc, bc.getLength()); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = POOLS.tccPool.take(); tcc.init(buffer, bc.getOffset(), bc.getEnd()); copyUncompressedChunk(bc.getChunk(), dest); bc.replaceSelfWith(tcc); return tcc; }
isDataReaderOpen = true; dataReader.readFileData(toRead.next, stripeOffset, cacheWrapper.getAllocator().isDirectAlloc()); toRelease = new IdentityHashMap<>(); DiskRangeList drl = toRead.next;
cacheWrapper.getAllocator().allocateMultiple( targetBuffers, (int)(partCount == 1 ? streamLen : partSize));