public static CompressedSequentialWriter open(String dataFilePath, String offsetsPath, CompressionParameters parameters, MetadataCollector sstableMetadataCollector) { return new CompressedSequentialWriter(new File(dataFilePath), offsetsPath, parameters, sstableMetadataCollector); }
/** * Seek to the offset where next compressed data chunk should be stored. */ private void seekToChunkStart() { if (getOnDiskFilePointer() != chunkOffset) { try { fchannel.position(chunkOffset); } catch (IOException e) { throw new FSReadError(e, getPath()); } } }
syncInternal(); throw new CorruptBlockException(getPath(), chunkOffset, chunkSize); throw new CorruptBlockException(getPath(), chunkOffset, chunkSize); throw new CorruptSSTableException(e, getPath()); throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath()); throw new FSReadError(e, getPath()); truncate(chunkOffset); metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
@Override protected void flushData() seekToChunkStart(); // why is this necessary? seems like it should always be at chunk start in normal operation throw new FSWriteError(e, getPath());
@Override public long getOnDiskFilePointer() { try { return fchannel.position(); } catch (IOException e) { throw new FSReadError(e, getPath()); } }
protected CompressionMetadata metadata(String path, long overrideLength, boolean isFinal) { if (writer == null) return CompressionMetadata.create(path); return writer.open(overrideLength, isFinal); }
syncInternal(); throw new CorruptBlockException(getPath(), chunkOffset, chunkSize, e); crcCheckBuffer.flip(); if (crcCheckBuffer.getInt() != (int) checksum.getValue()) throw new CorruptBlockException(getPath(), chunkOffset, chunkSize); throw new CorruptSSTableException(e, getPath()); throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath()); throw new FSReadError(e, getPath()); truncate(chunkOffset, bufferOffset); metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
@Override protected void flushData() seekToChunkStart(); // why is this necessary? seems like it should always be at chunk start in normal operation throw new FSWriteError(e, getPath());
private void truncate(long toFileSize, long toBufferOffset) { try { fchannel.truncate(toFileSize); lastFlushOffset = toBufferOffset; } catch (IOException e) { throw new FSWriteError(e, getPath()); } }
FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(); if (compression) dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(0)); FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete(); invalidateCacheAtBoundary(dfile);
syncInternal(); throw new CorruptBlockException(getPath(), chunkOffset, chunkSize, e); crcCheckBuffer.flip(); if (crcCheckBuffer.getInt() != (int) checksum.getValue()) throw new CorruptBlockException(getPath(), chunkOffset, chunkSize); throw new CorruptSSTableException(e, getPath()); throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath()); throw new FSReadError(e, getPath()); truncate(chunkOffset); metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
@Override protected void flushData() seekToChunkStart(); // why is this necessary? seems like it should always be at chunk start in normal operation throw new FSWriteError(e, getPath());
/** * Seek to the offset where next compressed data chunk should be stored. */ private void seekToChunkStart() { if (getOnDiskFilePointer() != chunkOffset) { try { fchannel.position(chunkOffset); } catch (IOException e) { throw new FSReadError(e, getPath()); } } }
@Override public long getOnDiskFilePointer() { try { return fchannel.position(); } catch (IOException e) { throw new FSReadError(e, getPath()); } }
dataFile = new CompressedSequentialWriter(new File(getFilename()), descriptor.filenameFor(Component.COMPRESSION_INFO), new File(descriptor.filenameFor(descriptor.digestComponent)),
FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(); if (compression) dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(0)); FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete(); invalidateCacheAtBoundary(dfile);
syncInternal(); throw new CorruptBlockException(getPath(), chunkOffset, chunkSize, e); crcCheckBuffer.flip(); if (crcCheckBuffer.getInt() != (int) checksum.getValue()) throw new CorruptBlockException(getPath(), chunkOffset, chunkSize); throw new CorruptSSTableException(e, getPath()); throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath()); throw new FSReadError(e, getPath()); truncate(chunkOffset, bufferOffset); metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
@Override protected void flushData() seekToChunkStart(); // why is this necessary? seems like it should always be at chunk start in normal operation throw new FSWriteError(e, getPath());