@Override public long getSize() { return diskRange.getLength(); }
@Override public long getSize() { return diskRange.getLength(); }
private <K> Map<K, OrcDataSourceInput> readLargeDiskRanges(Map<K, DiskRange> diskRanges) { if (diskRanges.isEmpty()) { return ImmutableMap.of(); } ImmutableMap.Builder<K, OrcDataSourceInput> slices = ImmutableMap.builder(); for (Entry<K, DiskRange> entry : diskRanges.entrySet()) { DiskRange diskRange = entry.getValue(); int bufferSize = toIntExact(streamBufferSize.toBytes()); FixedLengthSliceInput sliceInput = new LazySliceInput(diskRange.getLength(), new LazyChunkedSliceLoader(diskRange, bufferSize)); slices.put(entry.getKey(), new OrcDataSourceInput(sliceInput, bufferSize)); } return slices.build(); }
private <K> Map<K, OrcDataSourceInput> readLargeDiskRanges(Map<K, DiskRange> diskRanges) { if (diskRanges.isEmpty()) { return ImmutableMap.of(); } ImmutableMap.Builder<K, OrcDataSourceInput> slices = ImmutableMap.builder(); for (Entry<K, DiskRange> entry : diskRanges.entrySet()) { DiskRange diskRange = entry.getValue(); int bufferSize = toIntExact(streamBufferSize.toBytes()); FixedLengthSliceInput sliceInput = new LazySliceInput(diskRange.getLength(), new LazyChunkedSliceLoader(diskRange, bufferSize)); slices.put(entry.getKey(), new OrcDataSourceInput(sliceInput, bufferSize)); } return slices.build(); }
ImmutableMap.Builder<K, DiskRange> largeRangesBuilder = ImmutableMap.builder(); for (Entry<K, DiskRange> entry : diskRanges.entrySet()) { if (entry.getValue().getLength() <= maxReadSizeBytes) { smallRangesBuilder.put(entry);
@Override public <K> Map<K, OrcDataSourceInput> readFully(Map<K, DiskRange> diskRanges) throws IOException { ImmutableMap.Builder<K, OrcDataSourceInput> builder = ImmutableMap.builder(); // Assumption here: all disk ranges are in the same region. Therefore, serving them in arbitrary order // will not result in eviction of cache that otherwise could have served any of the DiskRanges provided. for (Map.Entry<K, DiskRange> entry : diskRanges.entrySet()) { DiskRange diskRange = entry.getValue(); byte[] buffer = new byte[diskRange.getLength()]; readFully(diskRange.getOffset(), buffer); builder.put(entry.getKey(), new OrcDataSourceInput(Slices.wrappedBuffer(buffer).getInput(), buffer.length)); } return builder.build(); }
ImmutableMap.Builder<K, DiskRange> largeRangesBuilder = ImmutableMap.builder(); for (Entry<K, DiskRange> entry : diskRanges.entrySet()) { if (entry.getValue().getLength() <= maxReadSizeBytes) { smallRangesBuilder.put(entry);
@Override public <K> Map<K, OrcDataSourceInput> readFully(Map<K, DiskRange> diskRanges) throws IOException { ImmutableMap.Builder<K, OrcDataSourceInput> builder = ImmutableMap.builder(); // Assumption here: all disk ranges are in the same region. Therefore, serving them in arbitrary order // will not result in eviction of cache that otherwise could have served any of the DiskRanges provided. for (Map.Entry<K, DiskRange> entry : diskRanges.entrySet()) { DiskRange diskRange = entry.getValue(); byte[] buffer = new byte[diskRange.getLength()]; readFully(diskRange.getOffset(), buffer); builder.put(entry.getKey(), new OrcDataSourceInput(Slices.wrappedBuffer(buffer).getInput(), buffer.length)); } return builder.build(); }
/** * Get a slice for the disk range from the provided buffers. The buffers ranges do not have * to exactly match {@code diskRange}, but {@code diskRange} must be completely contained within * one of the buffer ranges. */ public static Slice getDiskRangeSlice(DiskRange diskRange, Map<DiskRange, byte[]> buffers) { for (Entry<DiskRange, byte[]> bufferEntry : buffers.entrySet()) { DiskRange bufferRange = bufferEntry.getKey(); byte[] buffer = bufferEntry.getValue(); if (bufferRange.contains(diskRange)) { int offset = toIntExact(diskRange.getOffset() - bufferRange.getOffset()); return Slices.wrappedBuffer(buffer, offset, diskRange.getLength()); } } throw new IllegalStateException("No matching buffer for disk range"); } }
/** * Get a slice for the disk range from the provided buffers. The buffers ranges do not have * to exactly match {@code diskRange}, but {@code diskRange} must be completely contained within * one of the buffer ranges. */ public static Slice getDiskRangeSlice(DiskRange diskRange, Map<DiskRange, byte[]> buffers) { for (Entry<DiskRange, byte[]> bufferEntry : buffers.entrySet()) { DiskRange bufferRange = bufferEntry.getKey(); byte[] buffer = bufferEntry.getValue(); if (bufferRange.contains(diskRange)) { int offset = toIntExact(diskRange.getOffset() - bufferRange.getOffset()); return Slices.wrappedBuffer(buffer, offset, diskRange.getLength()); } } throw new IllegalStateException("No matching buffer for disk range"); } }
/** * Merge disk ranges that are closer than {@code maxMergeDistance}. */ public static List<DiskRange> mergeAdjacentDiskRanges(Collection<DiskRange> diskRanges, DataSize maxMergeDistance, DataSize maxReadSize) { // sort ranges by start offset List<DiskRange> ranges = new ArrayList<>(diskRanges); ranges.sort(comparingLong(DiskRange::getOffset)); // merge overlapping ranges long maxReadSizeBytes = maxReadSize.toBytes(); long maxMergeDistanceBytes = maxMergeDistance.toBytes(); ImmutableList.Builder<DiskRange> result = ImmutableList.builder(); DiskRange last = ranges.get(0); for (int i = 1; i < ranges.size(); i++) { DiskRange current = ranges.get(i); DiskRange merged = last.span(current); if (merged.getLength() <= maxReadSizeBytes && last.getEnd() + maxMergeDistanceBytes >= current.getOffset()) { last = merged; } else { result.add(last); last = current; } } result.add(last); return result.build(); }
private void load() { if (bufferSlice != null) { return; } try { byte[] buffer = new byte[diskRange.getLength()]; readFully(diskRange.getOffset(), buffer); bufferSlice = Slices.wrappedBuffer(buffer); } catch (IOException e) { throw new UncheckedIOException(e); } } }
private void load() { if (bufferSlice != null) { return; } try { byte[] buffer = new byte[diskRange.getLength()]; readFully(diskRange.getOffset(), buffer); bufferSlice = Slices.wrappedBuffer(buffer); } catch (IOException e) { throw new UncheckedIOException(e); } } }
public Map<StreamId, OrcInputStream> readDiskRanges(long stripeOffset, Map<StreamId, DiskRange> diskRanges, AggregatedMemoryContext systemMemoryUsage) throws IOException { // // Note: this code does not use the Java 8 stream APIs to avoid any extra object allocation // // transform ranges to have an absolute offset in file ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder(); for (Entry<StreamId, DiskRange> entry : diskRanges.entrySet()) { DiskRange diskRange = entry.getValue(); diskRangesBuilder.put(entry.getKey(), new DiskRange(stripeOffset + diskRange.getOffset(), diskRange.getLength())); } diskRanges = diskRangesBuilder.build(); // read ranges Map<StreamId, OrcDataSourceInput> streamsData = orcDataSource.readFully(diskRanges); // transform streams to OrcInputStream ImmutableMap.Builder<StreamId, OrcInputStream> streamsBuilder = ImmutableMap.builder(); for (Entry<StreamId, OrcDataSourceInput> entry : streamsData.entrySet()) { OrcDataSourceInput sourceInput = entry.getValue(); streamsBuilder.put(entry.getKey(), new OrcInputStream(orcDataSource.getId(), sourceInput.getInput(), decompressor, systemMemoryUsage, sourceInput.getRetainedSizeInBytes())); } return streamsBuilder.build(); }
public Map<StreamId, OrcInputStream> readDiskRanges(long stripeOffset, Map<StreamId, DiskRange> diskRanges, AggregatedMemoryContext systemMemoryUsage) throws IOException { // // Note: this code does not use the Java 8 stream APIs to avoid any extra object allocation // // transform ranges to have an absolute offset in file ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder(); for (Entry<StreamId, DiskRange> entry : diskRanges.entrySet()) { DiskRange diskRange = entry.getValue(); diskRangesBuilder.put(entry.getKey(), new DiskRange(stripeOffset + diskRange.getOffset(), diskRange.getLength())); } diskRanges = diskRangesBuilder.build(); // read ranges Map<StreamId, OrcDataSourceInput> streamsData = orcDataSource.readFully(diskRanges); // transform streams to OrcInputStream ImmutableMap.Builder<StreamId, OrcInputStream> streamsBuilder = ImmutableMap.builder(); for (Entry<StreamId, OrcDataSourceInput> entry : streamsData.entrySet()) { OrcDataSourceInput sourceInput = entry.getValue(); streamsBuilder.put(entry.getKey(), new OrcInputStream(orcDataSource.getId(), sourceInput.getInput(), decompressor, systemMemoryUsage, sourceInput.getRetainedSizeInBytes())); } return streamsBuilder.build(); }
@VisibleForTesting void readCacheAt(long offset) throws IOException { DiskRange newCacheRange = regionFinder.getRangeFor(offset); cachePosition = newCacheRange.getOffset(); cacheLength = newCacheRange.getLength(); if (cache.length < cacheLength) { cache = new byte[cacheLength]; } dataSource.readFully(newCacheRange.getOffset(), cache, 0, cacheLength); }
@VisibleForTesting void readCacheAt(long offset) throws IOException { DiskRange newCacheRange = regionFinder.getRangeFor(offset); cachePosition = newCacheRange.getOffset(); cacheLength = newCacheRange.getLength(); if (cache.length < cacheLength) { cache = new byte[cacheLength]; } dataSource.readFully(newCacheRange.getOffset(), cache, 0, cacheLength); }
public Slice loadNestedDiskRangeBuffer(DiskRange nestedDiskRange) { load(); checkArgument(diskRange.contains(nestedDiskRange)); int offset = toIntExact(nestedDiskRange.getOffset() - diskRange.getOffset()); return bufferSlice.slice(offset, nestedDiskRange.getLength()); }
public Slice loadNestedDiskRangeBuffer(DiskRange nestedDiskRange) { load(); checkArgument(diskRange.contains(nestedDiskRange)); int offset = toIntExact(nestedDiskRange.getOffset() - diskRange.getOffset()); return bufferSlice.slice(offset, nestedDiskRange.getLength()); }
DiskRange current = ranges.get(i); DiskRange merged = last.span(current); if (merged.getLength() <= maxReadSizeBytes && last.getEnd() + maxMergeDistanceBytes >= current.getOffset()) { last = merged;