jc, hiveOutputFormat, outputClass, isCompressed, tableInfo.getProperties(), path, reporter); writer.close(false); LOG.info("created empty bucket for enforcing bucketing at " + path);
rowOutWriters[findWriterOffset(row)].write(recordValue); } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) { fpaths.updaters[findWriterOffset(row)].insert(conf.getTableWriteId(), row);
public void appendRow(Row row) { List<Object> columns = row.getColumns(); checkArgument(columns.size() == columnTypes.size()); for (int channel = 0; channel < columns.size(); channel++) { tableInspector.setStructFieldData(orcRow, structFields.get(channel), columns.get(channel)); } try { recordWriter.write(serializer.serialize(orcRow, tableInspector)); } catch (IOException e) { throw new PrestoException(RAPTOR_ERROR, "Failed to write record", e); } rowCount++; uncompressedSize += row.getSizeInBytes(); }
private static void createSequentialFile(File file, int count) throws IOException, ReflectiveOperationException, SerDeException { FileSinkOperator.RecordWriter writer = createOrcRecordWriter(file, ORC_12, OrcTester.Compression.NONE, javaLongObjectInspector); @SuppressWarnings("deprecation") Serializer serde = new OrcSerde(); SettableStructObjectInspector objectInspector = createSettableStructObjectInspector("test", javaLongObjectInspector); Object row = objectInspector.create(); StructField field = objectInspector.getAllStructFieldRefs().get(0); for (int i = 0; i < count; i++) { objectInspector.setStructFieldData(row, field, (long) i); Writable record = serde.serialize(row, objectInspector); writer.write(record); } writer.close(false); } }
public void appendRow(Row row) { List<Object> columns = row.getColumns(); checkArgument(columns.size() == columnTypes.size()); for (int channel = 0; channel < columns.size(); channel++) { tableInspector.setStructFieldData(orcRow, structFields.get(channel), columns.get(channel)); } try { recordWriter.write(serializer.serialize(orcRow, tableInspector)); } catch (IOException e) { throw new PrestoException(RAPTOR_ERROR, "Failed to write record", e); } rowCount++; uncompressedSize += row.getSizeInBytes(); }
public void abortWriters(FileSystem fs, boolean abort, boolean delete) throws HiveException { for (int idx = 0; idx < outWriters.length; idx++) { if (outWriters[idx] != null) { try { outWriters[idx].close(abort); if (delete) { fs.delete(outPaths[idx], true); } updateProgress(); } catch (IOException e) { throw new HiveException(e); } } } } } // class FSPaths
private static void createSequentialFile(File file, int count) throws IOException, SerDeException { FileSinkOperator.RecordWriter writer = createOrcRecordWriter(file, ORC_12, CompressionKind.NONE, BIGINT); Serializer serde = new OrcSerde(); SettableStructObjectInspector objectInspector = createSettableStructObjectInspector("test", BIGINT); Object row = objectInspector.create(); StructField field = objectInspector.getAllStructFieldRefs().get(0); for (int i = 0; i < count; i++) { objectInspector.setStructFieldData(row, field, (long) i); Writable record = serde.serialize(row, objectInspector); writer.write(record); } writer.close(false); }
@Override public void close() { if (closed) { return; } closed = true; try { recordWriter.close(false); } catch (IOException e) { throw new PrestoException(RAPTOR_ERROR, "Failed to close writer", e); } }
private static void createSequentialFile(File file, int count) throws IOException, SerDeException { FileSinkOperator.RecordWriter writer = createOrcRecordWriter(file, ORC_12, CompressionKind.NONE, BIGINT); Serializer serde = new OrcSerde(); SettableStructObjectInspector objectInspector = createSettableStructObjectInspector("test", BIGINT); Object row = objectInspector.create(); StructField field = objectInspector.getAllStructFieldRefs().get(0); for (int i = 0; i < count; i++) { objectInspector.setStructFieldData(row, field, (long) i); Writable record = serde.serialize(row, objectInspector); writer.write(record); } writer.close(false); }
@Override public void close() { if (closed) { return; } closed = true; try { recordWriter.close(false); } catch (IOException e) { throw new PrestoException(RAPTOR_ERROR, "Failed to close writer", e); } }
public void abortWriters(FileSystem fs, boolean abort, boolean delete) throws HiveException { for (int idx = 0; idx < outWriters.length; idx++) { if (outWriters[idx] != null) { try { outWriters[idx].close(abort); if (delete) { fs.delete(outPaths[idx], true); } updateProgress(); } catch (IOException e) { throw new HiveException(e); } } } }
private static void createSequentialFile(File file, int count) throws IOException, SerDeException { FileSinkOperator.RecordWriter writer = createOrcRecordWriter(file, ORC_12, CompressionKind.NONE, BIGINT); @SuppressWarnings("deprecation") Serializer serde = new OrcSerde(); SettableStructObjectInspector objectInspector = createSettableStructObjectInspector("test", BIGINT); Object row = objectInspector.create(); StructField field = objectInspector.getAllStructFieldRefs().get(0); for (int i = 0; i < count; i++) { objectInspector.setStructFieldData(row, field, (long) i); Writable record = serde.serialize(row, objectInspector); writer.write(record); } writer.close(false); }
private static void writeEmptyFile(ConnectorSession session, Path target, JobConf conf, Properties properties, String serDe, String outputFormatName) { // Some serializers such as Avro set a property in the schema. initializeSerializer(conf, properties, serDe); // The code below is not a try with resources because RecordWriter is not Closeable. FileSinkOperator.RecordWriter recordWriter = HiveWriteUtils.createRecordWriter(target, conf, properties, outputFormatName, session); try { recordWriter.close(false); } catch (IOException e) { throw new PrestoException(HIVE_WRITER_CLOSE_ERROR, "Error write empty file to Hive", e); } }
@Override public void close(boolean abort) throws IOException { writer.close(abort); }
@Override public void close(boolean abort) throws IOException { writer.close(abort); }
@Override public void write(K key, V value) throws IOException { getHiveWriter().write(value); }
@Override public void close(boolean abort) throws IOException { writer.close(abort); }
@Override public void close(boolean abort) throws IOException { writer.close(abort); }
@Override public void commit() { try { recordWriter.close(false); committed = true; } catch (IOException e) { throw new PrestoException(HIVE_WRITER_CLOSE_ERROR, "Error committing write to Hive", e); } }
@Override public void close(boolean abort) throws IOException { recordWriter.close(abort); if (!abort) { length = target.getFileSystem(conf).getFileStatus(target).getLen(); } } };