private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(batchSize); this.conf.write(out); out.writeUTF(schema.toString()); out.writeInt(selectedFields.length); for (int f : selectedFields) { out.writeInt(f); } out.writeInt(conjunctPredicates.size()); for (Predicate p : conjunctPredicates) { out.writeObject(p); } }
@VisibleForTesting String getSchema() { return schema.toString(); }
" reader schema " + (readerSchema == null ? "NULL" : readerSchema.toString()) + " ACID scan property " + isAcidTableScan);
if (LOG.isDebugEnabled()) { LOG.debug("Column at " + childIx + " " + children.get(childIx).getId() + ":" + children.get(childIx).toString() + " has no data");
" reader schema " + (readerSchema == null ? "NULL" : readerSchema.toString()) + " transactional scan property " + isTransactionalTableScan);
default: throw new IllegalArgumentException("Unknown type " + schema.toString());
Reader r = OrcFile.createReader(fileStatus.getPath(), readerOptions); TypeDescription rowSchema = r.getSchema().getChildren().get(5); Assert.assertEquals("struct<a:int,b:string,s:struct<c:int,si:struct<d:double,e:float>>>", rowSchema.toString()); Reader r = OrcFile.createReader(fileStatus.getPath(), readerOptions); TypeDescription rowSchema = r.getSchema().getChildren().get(5); Assert.assertEquals("struct<a:int,b:string,s:struct<c:int,si:struct<d:double,e:float>>>", rowSchema.toString());
if (posn == -1) { throw new IllegalArgumentException("Field " + first + " not found in " + current.toString()); } else { throw new IllegalArgumentException("Field " + first + "not found in " + current.toString()); } else { throw new IllegalArgumentException("Field " + first + "not found in " + current.toString()); } catch (NumberFormatException e) { throw new IllegalArgumentException("Field " + first + "not found in " + current.toString(), e); "not found in " + current.toString());
jobConf.set(MAPRED_OUTPUT_SCHEMA.getAttribute(), OrcSchemaConverter.convert(fieldTypes, fieldNames).toString()); jobConf.set(COMPRESS.getAttribute(), compression.name()); jobConf.set(ROW_INDEX_STRIDE.getAttribute(), String.valueOf(rowIndexStride));
if (ORC == fileType) { final Reader reader = OrcFile.createReader(fileManager.getLastSourceFilePath(sourceFilePaths), OrcFile.readerOptions(configuration)); final String schema = reader.getSchema().toString(); log.info("ORC input file Schema " + schema);
public RowOrcOutputFormat( InternalType[] fieldTypes, String[] fieldNames, String dir, CompressionKind compression, String filePrefixName, int rowIndexStride) { Preconditions.checkArgument(fieldNames != null && fieldNames.length > 0); Preconditions.checkArgument(fieldTypes != null && fieldTypes.length == fieldNames.length); this.fieldTypes = fieldTypes; this.fieldNames = fieldNames; this.dir = dir; this.compression = compression; this.filePrefixName = filePrefixName; this.rowIndexStride = rowIndexStride; this.serializer = new OrcSerializer(fieldTypes, fieldNames); this.typeDescription = OrcSchemaConverter.convert(fieldTypes, fieldNames).toString(); this.struct = (OrcStruct) OrcStruct.createValue(TypeDescription.fromString(this.typeDescription)); }
LOG.debug("ORC file " + fileReader.path.toString() + " has data type conversion --\n" + "reader schema: " + options.getSchema().toString() + "\n" + "file schema: " + fileReader.getSchema());