public VectorizedRowBatch createRowBatch(boolean useDecimal64) { return useDecimal64 ? this.schema.createRowBatchV2() : this.schema.createRowBatch(); }
protected RecordReaderImpl(ReaderImpl fileReader, Reader.Options options, final Configuration conf) throws IOException { super(fileReader, options); final boolean useDecimal64ColumnVectors = conf != null && HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED).equalsIgnoreCase("decimal_64"); if (useDecimal64ColumnVectors){ batch = this.schema.createRowBatchV2(); } else { batch = this.schema.createRowBatch(); } rowInBatch = 0; }
DeleteReaderValue(Reader deleteDeltaReader, Path deleteDeltaFile, Reader.Options readerOptions, int bucket, ValidWriteIdList validWriteIdList, boolean isBucketedTable, final JobConf conf, OrcRawRecordMerger.KeyInterval keyInterval, OrcSplit orcSplit) throws IOException { this.reader = deleteDeltaReader; this.deleteDeltaFile = deleteDeltaFile; this.recordReader = deleteDeltaReader.rowsOptions(readerOptions, conf); this.bucketForSplit = bucket; final boolean useDecimal64ColumnVector = HiveConf.getVar(conf, ConfVars .HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED).equalsIgnoreCase("decimal_64"); if (useDecimal64ColumnVector) { this.batch = deleteDeltaReader.getSchema().createRowBatchV2(); } else { this.batch = deleteDeltaReader.getSchema().createRowBatch(); } if (!recordReader.nextBatch(batch)) { // Read the first batch. this.batch = null; // Oh! the first batch itself was null. Close the reader. } this.indexPtrInBatch = 0; this.validWriteIdList = validWriteIdList; this.isBucketedTable = isBucketedTable; if(batch != null) { checkBucketId();//check 1st batch } this.keyInterval = keyInterval; this.orcSplit = orcSplit; this.numEvents = deleteDeltaReader.getNumberOfRows(); LOG.debug("Num events stats({},x,x)", numEvents); }
RecordReaderImpl vrr = (RecordReaderImpl) vreader.rows(); RecordReaderImpl rr = (RecordReaderImpl) reader.rows(); VectorizedRowBatch batch = reader.getSchema().createRowBatchV2(); OrcStruct row = null;
VectorizedRowBatch vectorizedRowBatch = schema.createRowBatchV2(); vectorizedRowBatch.setPartitionInfo(1, 0); // set data column count as 1. long previousPayload = Long.MIN_VALUE;
RecordReader rows = reader.rowsOptions(new Reader.Options() .schema(readerSchema)); batch = readerSchema.createRowBatchV2(); lcv = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[0]); LongColumnVector future1 = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[1]); .schema(readerSchema) .include(new boolean[]{false, true, true, true, false, false, true})); batch = readerSchema.createRowBatchV2(); lcv = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[0]); future1 = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[1]);