@Override public EncodedReader encodedReader( Object fileKey, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException { return new EncodedReaderImpl(fileKey, types, codec, bufferSize, rowIndexStride, dataCache, dataReader, pf); } }
return addOneCompressionBlockByteBuffer(slice, isUncompressed, cbStartOffset, cbEndOffset, chunkLength, current, toDecompress, cacheBuffers); badEstimates.add(addIncompleteCompressionBuffer(cbStartOffset, current, 0)); return null; // This is impossible to read from this chunk. ByteBuffer copy = allocateBuffer(chunkLength, compressed.isDirect()); current.removeSelf(); if (originalPos == 0 && toRelease.remove(compressed)) { releaseBuffer(compressed, true); slice.limit(remaining); copy.put(slice); ProcCacheChunk cc = addOneCompressionBlockByteBuffer(copy, isUncompressed, cbStartOffset, cbEndOffset, remaining, (BufferChunk)next, toDecompress, cacheBuffers); if (compressed.remaining() <= 0 && toRelease.remove(compressed)) { releaseBuffer(compressed, true); // We copied the entire buffer. releaseBuffer(compressed, true); // We copied the entire buffer. badEstimates.add(addIncompleteCompressionBuffer(cbStartOffset, tmp, extraChunkCount)); return null; // This is impossible to read from this chunk.
private CacheChunk copyAndReplaceCandidateToNonCached( UncompressedCacheChunk candidateCached, long partOffset, long candidateEnd, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { // We thought we had the entire part to cache, but we don't; convert start to // non-cached. Since we are at the first gap, the previous stuff must be contiguous. singleAlloc[0] = null; trace.logPartialUncompressedData(partOffset, candidateEnd, true); allocateMultiple(singleAlloc, (int)(candidateEnd - partOffset)); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = new CacheChunk(buffer, partOffset, candidateEnd); copyAndReplaceUncompressedChunks(candidateCached, dest, tcc, false); return tcc; }
throws IOException { if (!first.hasContiguousNext()) { badEstimates.add(addIncompleteCompressionBuffer( cbStartOffset, first, 0, isTracingEnabled, trace)); return null; // This is impossible to read from this chunk. int ix = readLengthBytes(first.getChunk(), result, 0); assert ix < 3; // Otherwise we wouldn't be here. DiskRangeList current = first.next; ix = readLengthBytes(currentBc.getChunk(), result, ix); if (ix == 3) return currentBc; // Done, we have 3 bytes. Continue reading this buffer. DiskRangeList tmp = current; tmp.removeSelf(); } else { badEstimates.add(addIncompleteCompressionBuffer( cbStartOffset, tmp, -1, isTracingEnabled, trace)); return null; // This is impossible to read from this chunk.
DiskRangeList current = findExactPosition(start, cOffset); if (isTracingEnabled) { LOG.trace("Starting read for [" + cOffset + "," + endCOffset + ") at " + current); prepareRangesForCompressedRead(cOffset, endCOffset, streamOffset, unlockUntilCOffset, current, csd, toRelease, toReleaseCopies, toDecompress, badEstimates) : prepareRangesForUncompressedRead( cOffset, endCOffset, streamOffset, unlockUntilCOffset, current, csd); } catch (Exception ex) { releaseBuffers(toReleaseCopies, false); return lastUncompressed; // Nothing to do. ByteBuffer dest = chunk.getBuffer().getByteBufferRaw(); if (chunk.isOriginalDataCompressed) { decompressChunk(chunk.originalData, codec, dest); } else { copyUncompressedChunk(chunk.originalData, dest); releaseBuffers(toReleaseCopies, false); long[] collisionMask = cacheWrapper.putFileData( fileKey, cacheKeys, targetBuffers, baseOffset); processCacheCollisions(collisionMask, toDecompress, targetBuffers, csd.getCacheBuffers()); ponderReleaseInitialRefcount(unlockUntilCOffset, streamOffset, chunk);
DiskRangeList current = findIntersectingPosition(start, streamOffset, streamEnd); if (isTracingEnabled) { LOG.trace("Starting pre-read for [" + streamOffset + "," + streamEnd + ") at " + current); int partSize = determineUncompressedPartSize(), partCount = (int)(streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0); lastUncompressed = copyAndReplaceCandidateToNonCached( candidateCached, partOffset, hasEntirePartTo, cacheWrapper, singleAlloc); candidateCached = null; copyAndReplaceCandidateToNonCached( candidateCached, partOffset, hadEntirePartTo, cacheWrapper, singleAlloc); candidateCached = null; lastUncompressed = copyAndReplaceUncompressedToNonCached(curBc, cacheWrapper, singleAlloc); allocateMultiple(targetBuffers, (int)(partCount == 1 ? streamLen : partSize)); candidateCached.setBuffer(targetBuffers[ix]); ByteBuffer dest = candidateCached.getBuffer().getByteBufferRaw(); copyAndReplaceUncompressedChunks(candidateCached, dest, candidateCached, true); candidateCached.clear(); lastUncompressed = candidateCached; long[] collisionMask = cacheWrapper.putFileData( fileKey, cacheKeys, targetBuffers, baseOffset, tag); processCacheCollisions(collisionMask, toCache, targetBuffers, null);
LOG.trace("The following columns have PRESENT streams: " + arrayToString(hasNull)); MutateHelper toRead = getDataFromCacheAndDisk( listToRead.get(), stripeOffset, hasFileId, toRelease); DiskRangeList iter = preReadUncompressedStreams(stripeOffset, colCtxs, toRead, toRelease); DiskRangeList lastCached = readEncodedStream(stripeOffset, iter, sctx.offset, sctx.offset + sctx.length, sctx.stripeLevelStream, unlockUntilCOffset, sctx.offset, toRelease); cb = createRgColumnStreamData(rgIx, isLastRg, ctx.colIx, sctx, cOffset, endCOffset, isCompressed, unlockUntilCOffset); boolean isStartOfStream = sctx.bufferIter == null; DiskRangeList lastCached = readEncodedStream(stripeOffset, (isStartOfStream ? iter : sctx.bufferIter), cOffset, endCOffset, cb, unlockUntilCOffset, sctx.offset, toRelease); } finally { if (hasErrorForEcb) { releaseEcbRefCountsOnError(ecb); releaseEcbRefCountsOnError(ecb); throw new IOException(e); releaseInitialRefcounts(toRead.next); releaseBuffers(toRelease.keySet(), true); } catch (Throwable t) {
LOG.trace("The following columns have PRESENT streams: " + arrayToString(hasNull)); for (int streamIx = 0; streamIx < ctx.streamCount; ++streamIx) { StreamContext sctx = ctx.streams[streamIx]; DiskRangeList newIter = preReadUncompressedStream( stripeOffset, iter, sctx.offset, sctx.offset + sctx.length); if (newIter != null) { releaseBuffers(toRelease.keySet(), true); toRelease = null; DiskRangeList lastCached = readEncodedStream(stripeOffset, iter, sctx.offset, sctx.offset + sctx.length, sctx.stripeLevelStream, unlockUntilCOffset, sctx.offset, toRelease); cb = createRgColumnStreamData( rgIx, isLastRg, ctx.colIx, sctx, cOffset, endCOffset, isCompressed); boolean isStartOfStream = sctx.bufferIter == null; DiskRangeList lastCached = readEncodedStream(stripeOffset, (isStartOfStream ? iter : sctx.bufferIter), cOffset, endCOffset, cb, unlockUntilCOffset, sctx.offset, toRelease); releaseInitialRefcounts(toRead.next); releaseBuffers(toRelease.keySet(), true); releaseCacheChunksIntoObjectPool(toRead.next);
LOG.trace("Adding an already-uncompressed buffer " + cc.getBuffer()); ponderReleaseInitialRefcount(unlockUntilCOffset, streamOffset, cc); lastUncompressed = cc; next = current.next; ProcCacheChunk newCached = addOneCompressionBuffer(bc, columnStreamData.getCacheBuffers(), toDecompress, toRelease, toReleaseCopies, badEstimates); lastUncompressed = (newCached == null) ? lastUncompressed : newCached;
private CacheChunk copyAndReplaceUncompressedToNonCached( BufferChunk bc, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { singleAlloc[0] = null; trace.logPartialUncompressedData(bc.getOffset(), bc.getEnd(), false); allocateMultiple(singleAlloc, bc.getLength()); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = new CacheChunk(buffer, bc.getOffset(), bc.getEnd()); copyUncompressedChunk(bc.getChunk(), dest); bc.replaceSelfWith(tcc); return tcc; }
private static CacheChunk copyAndReplaceCandidateToNonCached( UncompressedCacheChunk candidateCached, long partOffset, long candidateEnd, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { // We thought we had the entire part to cache, but we don't; convert start to // non-cached. Since we are at the first gap, the previous stuff must be contiguous. singleAlloc[0] = null; cacheWrapper.getAllocator().allocateMultiple(singleAlloc, (int)(candidateEnd - partOffset)); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = POOLS.tccPool.take(); tcc.init(buffer, partOffset, candidateEnd); copyAndReplaceUncompressedChunks(candidateCached, dest, tcc); return tcc; }
private static CacheChunk copyAndReplaceUncompressedToNonCached( BufferChunk bc, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { singleAlloc[0] = null; cacheWrapper.getAllocator().allocateMultiple(singleAlloc, bc.getLength()); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = POOLS.tccPool.take(); tcc.init(buffer, bc.getOffset(), bc.getEnd()); copyUncompressedChunk(bc.getChunk(), dest); bc.replaceSelfWith(tcc); return tcc; }
DiskRangeList current = findExactPosition(start, cOffset); if (isTracingEnabled) { LOG.trace("Starting read for [" + cOffset + "," + endCOffset + ") at " + current); prepareRangesForCompressedRead(cOffset, endCOffset, streamOffset, unlockUntilCOffset, current, csd, toRelease, toReleaseCopies, toDecompress, badEstimates) : prepareRangesForUncompressedRead( cOffset, endCOffset, streamOffset, unlockUntilCOffset, current, csd); } catch (Exception ex) { releaseBuffers(toReleaseCopies, false); return lastUncompressed; // Nothing to do. allocateMultiple(targetBuffers, bufferSize); isAllocated = true; } finally { boolean isOk = false; try { decompressChunk(chunk.originalData, codec, dest); isOk = true; } finally { copyUncompressedChunk(chunk.originalData, dest); releaseBuffers(toReleaseCopies, false); long[] collisionMask = cacheWrapper.putFileData( fileKey, cacheKeys, targetBuffers, baseOffset, tag);
DiskRangeList current = findIntersectingPosition(start, streamOffset, streamEnd); if (isTracingEnabled) { LOG.trace("Starting pre-read for [" + streamOffset + "," + streamEnd + ") at " + current); int partSize = determineUncompressedPartSize(), partCount = (int)(streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0); if (noMoreDataForPart && hasEntirePartTo < partEnd && candidateCached != null) { lastUncompressed = copyAndReplaceCandidateToNonCached( candidateCached, partOffset, hasEntirePartTo, cacheWrapper, singleAlloc); candidateCached = null; copyAndReplaceCandidateToNonCached( candidateCached, partOffset, hadEntirePartTo, cacheWrapper, singleAlloc); candidateCached = null; lastUncompressed = copyAndReplaceUncompressedToNonCached(curBc, cacheWrapper, singleAlloc); candidateCached.setBuffer(targetBuffers[ix]); ByteBuffer dest = candidateCached.getBuffer().getByteBufferRaw(); copyAndReplaceUncompressedChunks(candidateCached, dest, candidateCached); candidateCached.clear(); lastUncompressed = candidateCached; processCacheCollisions(collisionMask, toCache, targetBuffers, null);
LOG.trace("Adding an already-uncompressed buffer " + cc.getBuffer()); ponderReleaseInitialRefcount(unlockUntilCOffset, streamOffset, cc); lastUncompressed = cc; next = current.next; ProcCacheChunk newCached = addOneCompressionBuffer(bc, columnStreamData.getCacheBuffers(), toDecompress, toRelease, toReleaseCopies, badEstimates); lastUncompressed = (newCached == null) ? lastUncompressed : newCached;
current = readLengthBytesFromSmallBuffers( current, cbStartOffset, bytes, badEstimates, isTracingEnabled, trace); if (current == null) return null; return addOneCompressionBlockByteBuffer(slice, isUncompressed, cbStartOffset, cbEndOffset, chunkLength, current, toDecompress, cacheBuffers, false); badEstimates.add(addIncompleteCompressionBuffer( cbStartOffset, current, 0, isTracingEnabled, trace)); return null; // This is impossible to read from this chunk. ByteBuffer copy = allocateBuffer(chunkLength, compressed.isDirect()); current.removeSelf(); if (originalPos == 0 && toRelease.remove(compressed)) { releaseBuffer(compressed, true); slice.limit(remaining); copy.put(slice); ProcCacheChunk cc = addOneCompressionBlockByteBuffer(copy, isUncompressed, cbStartOffset, cbEndOffset, remaining, (BufferChunk)next, toDecompress, cacheBuffers, true); if (compressed.remaining() <= 0 && toRelease.remove(compressed)) { releaseBuffer(compressed, true); // We copied the entire buffer. releaseBuffer(compressed, true); // We copied the entire buffer. tmp.removeSelf(); } else { badEstimates.add(addIncompleteCompressionBuffer(
@Override public EncodedReader encodedReader(Object fileKey, DataCache dataCache, DataReader dataReader, PoolFactory pf, IoTrace trace, boolean useCodecPool, String tag) throws IOException { return new EncodedReaderImpl(fileKey, types, getSchema(), compressionKind, getWriterVersion(), bufferSize, rowIndexStride, dataCache, dataReader, pf, trace, useCodecPool, tag); } }