@Override public void sync() throws IOException { flush(); Hadoop.FSDataOutputStream.hsync.invoke(outgoing); }
@Override public void append(E entity) throws IOException { writer.writeNext(shred(entity)); }
private String[] shred(E entity) { if (entity instanceof IndexedRecord) { return shredIndexed((IndexedRecord) entity, schema); } else { return shredReflect(entity, schema); } }
.schema(schema) .build(); CSVAppender<GenericRecord> appender = new CSVAppender<GenericRecord>(fs, path, descriptor); GenericRecord record = new GenericData.Record(schema); appender.open(); for (int i = 0; i < 10; i += 1) { record.put("id", i); record.put("email", Integer.toString(i) + "@example.com"); appender.append(record); 0, count(fs, path, descriptor)); appender.flush(); appender.sync(); record.put("id", i); record.put("email", Integer.toString(i) + "@example.com"); appender.append(record); 10, count(fs, path, descriptor)); appender.close(); appender.cleanup();
@VisibleForTesting @SuppressWarnings("unchecked") <E> FileAppender<E> newAppender(Path temp) { Format format = descriptor.getFormat(); if (Formats.PARQUET.equals(format)) { // by default, Parquet is not durable if (DescriptorUtil.isDisabled( FileSystemProperties.NON_DURABLE_PARQUET_PROP, descriptor)) { return (FileAppender<E>) new DurableParquetAppender( fs, temp, schema, conf, descriptor.getCompressionType()); } else { return (FileAppender<E>) new ParquetAppender( fs, temp, schema, conf, descriptor.getCompressionType()); } } else if (Formats.AVRO.equals(format)) { return new AvroAppender<E>(fs, temp, schema, descriptor.getCompressionType()); } else if (Formats.CSV.equals(format) && DescriptorUtil.isEnabled(FileSystemProperties.ALLOW_CSV_PROP, descriptor)) { return new CSVAppender<E>(fs, temp, descriptor); } else { this.state = ReaderWriterState.ERROR; throw new UnknownFormatException("Unknown format " + descriptor); } }
.schema(schema) .build(); CSVAppender<GenericRecord> appender = new CSVAppender<GenericRecord>(fs, path, descriptor); GenericRecord record = new GenericData.Record(schema); appender.open(); for (int i = 0; i < 10; i += 1) { record.put("id", i); record.put("email", Integer.toString(i) + "@example.com"); appender.append(record); 0, count(fs, path, descriptor)); appender.flush(); appender.sync(); record.put("id", i); record.put("email", Integer.toString(i) + "@example.com"); appender.append(record); 10, count(fs, path, descriptor)); appender.close(); appender.cleanup();