@Override public void close() throws IOException { flushRowGroup(true); writeStore.close(); writer.end(metadata); } }
private void flush() throws IOException { try { if (recordCount > 0) { parquetFileWriter.startBlock(recordCount); consumer.flush(); store.flush(); pageStore.flushToFileWriter(parquetFileWriter); recordCount = 0; parquetFileWriter.endBlock(); // we are writing one single block per file parquetFileWriter.end(extraMetaData); parquetFileWriter = null; } } finally { store.close(); pageStore.close(); store = null; pageStore = null; index++; } }
private void flushAndClose() throws IOException { if(parquetFileWriter == null){ return; } if (recordCount > 0) { long memSize = store.getBufferedSize(); parquetFileWriter.startBlock(recordCount); consumer.flush(); store.flush(); ColumnChunkPageWriteStoreExposer.flushPageStore(pageStore, parquetFileWriter); parquetFileWriter.endBlock(); long recordsWritten = recordCount; // we are writing one single block per file parquetFileWriter.end(extraMetaData); byte[] metadata = this.trackingConverter == null ? null : trackingConverter.getMetadata(); final long fileSize = parquetFileWriter.getPos(); listener.recordsWritten(recordsWritten, fileSize, path.toString(), metadata /** TODO: add parquet footer **/, partition.getBucketNumber()); parquetFileWriter = null; updateStats(memSize, recordCount); recordCount = 0; } if(store != null){ store.close(); } store = null; pageStore = null; index++; }