metadataWriter.addOffset(chunkOffset); chunkCount++;
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4); if (compressed.capacity() < chunkSize) compressed = compressor.preferredBufferType().allocate(chunkSize); metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4); if (compressed.capacity() < chunkSize) compressed = compressor.preferredBufferType().allocate(chunkSize); metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4); if (compressed.capacity() < chunkSize) compressed = compressor.preferredBufferType().allocate(chunkSize); metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4); if (compressed.buffer.length < chunkSize) compressed.buffer = new byte[chunkSize]; metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
metadataWriter.addOffset(chunkOffset); chunkCount++;
metadataWriter.addOffset(chunkOffset); chunkCount++;
metadataWriter.addOffset(chunkOffset); chunkCount++;
metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);
metadataWriter.addOffset(chunkOffset); chunkCount++;
metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);
metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);
metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (HadoopFileUtils.HadoopFileChannel hos = HadoopFileUtils.newFilesystemChannel(filePath); DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(hos)) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); //fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void close(long dataLength, int chunks) throws IOException { FileOutputStream fos = null; DataOutputStream out = null; try { fos = new FileOutputStream(filePath); out = new DataOutputStream(new BufferedOutputStream(fos)); assert chunks == count; writeHeader(out, dataLength, chunks); for (int i = 0 ; i < count ; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } finally { FileUtils.closeQuietly(out); } }
public CompressedSequentialWriter(File file, String offsetsPath, CompressionParameters parameters, MetadataCollector sstableMetadataCollector) { super(file, parameters.chunkLength()); this.compressor = parameters.sstableCompressor; // buffer for compression should be the same size as buffer itself compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]); /* Index File (-CompressionInfo.db component) and it's header */ metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath); this.sstableMetadataCollector = sstableMetadataCollector; crcMetadata = new DataIntegrityMetadata.ChecksumWriter(out); }
public static Writer open(CompressionParameters parameters, String path) { return new Writer(parameters, path); }