@Override public long getSize() { return dataSource.getSize(); }
@Override public long getSize() { return dataSource.getSize(); }
@Override public long getSize() { return delegate.getSize(); }
@Override public long getSize() { return delegate.getSize(); }
private static OrcDataSource wrapWithCacheIfTiny(OrcDataSource dataSource, DataSize maxCacheSize) { if (dataSource instanceof CachingOrcDataSource) { return dataSource; } if (dataSource.getSize() > maxCacheSize.toBytes()) { return dataSource; } DiskRange diskRange = new DiskRange(0, toIntExact(dataSource.getSize())); return new CachingOrcDataSource(dataSource, desiredOffset -> diskRange); }
private static OrcDataSource wrapWithCacheIfTiny(OrcDataSource dataSource, DataSize maxCacheSize) { if (dataSource instanceof CachingOrcDataSource) { return dataSource; } if (dataSource.getSize() > maxCacheSize.toBytes()) { return dataSource; } DiskRange diskRange = new DiskRange(0, toIntExact(dataSource.getSize())); return new CachingOrcDataSource(dataSource, desiredOffset -> diskRange); }
public OrcRecordReader createRecordReader( Map<Integer, Type> includedColumns, OrcPredicate predicate, long offset, long length, DateTimeZone hiveStorageTimeZone, AggregatedMemoryContext systemMemoryUsage, int initialBatchSize) { return new OrcRecordReader( requireNonNull(includedColumns, "includedColumns is null"), requireNonNull(predicate, "predicate is null"), footer.getNumberOfRows(), footer.getStripes(), footer.getFileStats(), metadata.getStripeStatsList(), orcDataSource, offset, length, footer.getTypes(), decompressor, footer.getRowsInRowGroup(), requireNonNull(hiveStorageTimeZone, "hiveStorageTimeZone is null"), hiveWriterVersion, metadataReader, maxMergeDistance, tinyStripeThreshold, maxBlockSize, footer.getUserMetadata(), systemMemoryUsage,
public OrcRecordReader createRecordReader( Map<Integer, Type> includedColumns, OrcPredicate predicate, long offset, long length, DateTimeZone hiveStorageTimeZone, AggregatedMemoryContext systemMemoryUsage, int initialBatchSize) { return new OrcRecordReader( requireNonNull(includedColumns, "includedColumns is null"), requireNonNull(predicate, "predicate is null"), footer.getNumberOfRows(), footer.getStripes(), footer.getFileStats(), metadata.getStripeStatsList(), orcDataSource, offset, length, footer.getTypes(), decompressor, footer.getRowsInRowGroup(), requireNonNull(hiveStorageTimeZone, "hiveStorageTimeZone is null"), hiveWriterVersion, metadataReader, maxMergeDistance, tinyStripeThreshold, maxBlockSize, footer.getUserMetadata(), systemMemoryUsage,
long size = orcDataSource.getSize(); if (size <= MAGIC.length()) { throw new OrcCorruptionException(orcDataSource.getId(), "Invalid file size %s", size);
long size = orcDataSource.getSize(); if (size <= MAGIC.length()) { throw new OrcCorruptionException(orcDataSource.getId(), "Invalid file size %s", size);