return nextString(vector, row, previous); case CHAR: return nextChar(vector, row, schema.getMaxLength(), previous); case VARCHAR: return nextVarchar(vector, row, schema.getMaxLength(), previous); case BINARY: return nextBinary(vector, row, previous);
return nextString(vector, row, previous); case CHAR: return nextChar(vector, row, schema.getMaxLength(), previous); case VARCHAR: return nextVarchar(vector, row, schema.getMaxLength(), previous); case BINARY: return nextBinary(vector, row, previous);
case CHAR: type.setKind(OrcProto.Type.Kind.CHAR); type.setMaximumLength(schema.getMaxLength()); break; case VARCHAR: type.setKind(OrcProto.Type.Kind.VARCHAR); type.setMaximumLength(schema.getMaxLength()); break; case BINARY:
CharTreeWriter(int columnId, TypeDescription schema, WriterContext writer, boolean nullable) throws IOException { super(columnId, schema, writer, nullable); maxLength = schema.getMaxLength(); // utf-8 is currently 4 bytes long, but it could be upto 6 padding = new byte[6*maxLength]; }
VarcharTreeWriter(int columnId, TypeDescription schema, WriterContext writer, boolean nullable) throws IOException { super(columnId, schema, writer, nullable); maxLength = schema.getMaxLength(); }
CharTreeWriter(int columnId, TypeDescription schema, StreamFactory writer, boolean nullable) throws IOException { super(columnId, schema, writer, nullable); itemLength = schema.getMaxLength(); padding = new byte[itemLength]; }
CharTreeWriter(int columnId, TypeDescription schema, StreamFactory writer, boolean nullable) throws IOException { super(columnId, schema, writer, nullable); itemLength = schema.getMaxLength(); padding = new byte[itemLength]; }
return CharStreamReader.builder() .setColumnIndex(columnIndex) .setMaxLength(columnType.getMaxLength()) .setPresentStream(present) .setDataStream(data) return VarcharStreamReader.builder() .setColumnIndex(columnIndex) .setMaxLength(columnType.getMaxLength()) .setPresentStream(present) .setDataStream(data)
return CharStreamReader.builder() .setColumnIndex(columnIndex) .setMaxLength(columnType.getMaxLength()) .setPresentStream(present) .setDataStream(data) return VarcharStreamReader.builder() .setColumnIndex(columnIndex) .setMaxLength(columnType.getMaxLength()) .setPresentStream(present) .setDataStream(data)
if (schema.getMaxLength() < hash.length) { targetLength = schema.getMaxLength(); targetLength = schema.getMaxLength(); break;
protected TreeReader getStringGroupTreeReader(int columnId, TypeDescription fileType, Context context) throws IOException { switch (fileType.getCategory()) { case STRING: return new StringTreeReader(columnId, context); case CHAR: return new CharTreeReader(columnId, fileType.getMaxLength()); case VARCHAR: return new VarcharTreeReader(columnId, fileType.getMaxLength()); default: throw new RuntimeException("Unexpected type kind " + fileType.getCategory().name()); } }
case VARCHAR: output.print('('); output.print(type.getMaxLength()); output.print(')'); break;
.rightTrimAndTruncate(bytesColVector.vector[elementNum], bytesColVector.start[elementNum], length, readerType.getMaxLength()); if (adjustedDownLen < length) { bytesColVector.length[elementNum] = adjustedDownLen; .truncate(bytesColVector.vector[elementNum], bytesColVector.start[elementNum], length, readerType.getMaxLength()); if (adjustedDownLen < length) { bytesColVector.length[elementNum] = adjustedDownLen;
protected void assignStringGroupVectorEntry(BytesColumnVector bytesColVector, int elementNum, TypeDescription readerType, byte[] bytes, int start, int length) { switch (readerType.getCategory()) { case STRING: bytesColVector.setVal(elementNum, bytes, start, length); break; case CHAR: { int adjustedDownLen = StringExpr.rightTrimAndTruncate(bytes, start, length, readerType.getMaxLength()); bytesColVector.setVal(elementNum, bytes, start, adjustedDownLen); } break; case VARCHAR: { int adjustedDownLen = StringExpr.truncate(bytes, start, length, readerType.getMaxLength()); bytesColVector.setVal(elementNum, bytes, start, adjustedDownLen); } break; default: throw new RuntimeException("Unexpected type kind " + readerType.getCategory().name()); } }
case CHAR: setCharValue((BytesColumnVector) vector, row, (Text) value, schema.getMaxLength()); break; case VARCHAR: setBinaryValue(vector, row, (Text) value, schema.getMaxLength()); break; case BINARY:
return (fileType.getMaxLength() <= readerType.getMaxLength());
case CHAR: type.setKind(OrcProto.Type.Kind.CHAR); type.setMaximumLength(schema.getMaxLength()); break; case VARCHAR: type.setKind(OrcProto.Type.Kind.VARCHAR); type.setMaximumLength(schema.getMaxLength()); break; case BINARY:
case CHAR: type.setKind(OrcProto.Type.Kind.CHAR); type.setMaximumLength(schema.getMaxLength()); break; case VARCHAR: type.setKind(OrcProto.Type.Kind.VARCHAR); type.setMaximumLength(schema.getMaxLength()); break; case BINARY:
case CHAR: type.setKind(OrcProto.Type.Kind.CHAR); type.setMaximumLength(schema.getMaxLength()); break; case VARCHAR: type.setKind(OrcProto.Type.Kind.VARCHAR); type.setMaximumLength(schema.getMaxLength()); break; case BINARY:
return new StringTreeReader(fileType.getId(), context); case CHAR: return new CharTreeReader(fileType.getId(), readerType.getMaxLength()); case VARCHAR: return new VarcharTreeReader(fileType.getId(), readerType.getMaxLength()); case BINARY: return new BinaryTreeReader(fileType.getId(), context);