public SparkOrcReader(InputFile location,
FileScanTask task,
Schema readSchema) {
ColumnIdMap columnIds = new ColumnIdMap();
orcSchema = TypeConversion.toOrc(readSchema, columnIds);
reader = ORC.read(location)
.split(task.start(), task.length())
.schema(readSchema)
.build();
int numFields = readSchema.columns().size();
row = new UnsafeRow(numFields);
holder = new BufferHolder(row, INITIAL_SIZE);
writer = new UnsafeRowWriter(holder, numFields);
converter = new Converter[numFields];
for(int c=0; c < numFields; ++c) {
converter[c] = buildConverter(holder, orcSchema.getChildren().get(c));
}
}