Tabnine Logo
ReplicaOutputStreams
Code IndexAdd Tabnine to your IDE (free)

How to use
ReplicaOutputStreams
in
org.apache.hadoop.hdfs.server.datanode.fsdataset

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

  crcOut.getChannel().position(crcDiskSize);
 return new ReplicaOutputStreams(blockOut, crcOut, checksum,
   getVolume(), fileIoProvider);
} catch (IOException e) {
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
  int checksumSize) throws IOException {
 FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
 FileChannel channel = file.getChannel();
 long oldPos = channel.position();
 long newPos = oldPos - checksumSize;
 if (LOG.isDebugEnabled()) {
  LOG.debug("Changing meta file offset of block " + b + " from " +
    oldPos + " to " + newPos);
 }
 channel.position(newPos);
}
origin: org.apache.hadoop/hadoop-hdfs

if (syncOnClose && (streams.getDataOut() != null || checksumOut != null)) {
 datanode.metrics.incrFsyncCount();      
  if (syncOnClose) {
   long fsyncStartNanos = flushEndNanos;
   streams.syncChecksumOut();
   datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
 if (streams.getDataOut() != null) {
  long flushStartNanos = System.nanoTime();
  streams.flushDataOut();
  long flushEndNanos = System.nanoTime();
  if (syncOnClose) {
   long fsyncStartNanos = flushEndNanos;
   streams.syncDataOut();
   datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
  streams.closeDataStream();
 streams.close();
origin: org.apache.hadoop/hadoop-hdfs

 long flushEndNanos = System.nanoTime();
 if (isSync) {
  streams.syncChecksumOut();
  datanode.metrics.addFsyncNanos(System.nanoTime() - flushEndNanos);
if (streams.getDataOut() != null) {
 long flushStartNanos = System.nanoTime();
 streams.flushDataOut();
 long flushEndNanos = System.nanoTime();
 if (isSync) {
  long fsyncStartNanos = flushEndNanos;
  streams.syncDataOut();
  datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
 dirSyncOnHSyncDone = true;
if (checksumOut != null || streams.getDataOut() != null) {
 datanode.metrics.addFlushNanos(flushTotalNanos);
 if (isSync) {
origin: org.apache.hadoop/hadoop-hdfs

if (checksumReceivedLen == 0 && !streams.isTransientStorage()) {
  && streams.isTransientStorage();
try {
 long onDiskLen = replicaInfo.getBytesOnDisk();
  streams.writeDataToDisk(dataBuf.array(),
    startByteToDisk, numBytesToDisk);
  long duration = Time.monotonicNow() - begin;
origin: ch.cern.hadoop/hadoop-hdfs

this.diskChecksum = streams.getChecksum();
this.needsChecksumTranslation = !clientChecksum.equals(diskChecksum);
this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
this.checksumSize = diskChecksum.getChecksumSize();
this.out = streams.getDataOut();
if (out instanceof FileOutputStream) {
 this.outFd = ((FileOutputStream)out).getFD();
  streams.getChecksumOut(), HdfsConstants.SMALL_BUFFER_SIZE));
origin: ch.cern.hadoop/hadoop-hdfs

if (isSync) {
 long fsyncStartNanos = flushEndNanos;
 streams.syncChecksumOut();
 datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
if (isSync) {
 long fsyncStartNanos = flushEndNanos;
 streams.syncDataOut();
 datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
origin: org.apache.hadoop/hadoop-hdfs

this.diskChecksum = streams.getChecksum();
this.needsChecksumTranslation = !clientChecksum.equals(diskChecksum);
this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
  streams.getChecksumOut(), DFSUtilClient.getSmallBufferSize(
  datanode.getConf())));
origin: ch.cern.hadoop/hadoop-hdfs

 streams = replicaInfo.createStreams(true,
   DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
 streams.getChecksumOut().write('a');
 dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
 try {
   anyBoolean(), any(DatanodeID[].class), any(String[].class));
} finally {
 streams.close();
origin: ch.cern.hadoop/hadoop-hdfs

int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
  throws IOException {
 int bytesAdded = 0;
 for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
  ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
  // we pass expected len as zero, - fsdataset should use the sizeof actual
  // data written
  ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
    StorageType.DEFAULT, b, false).getReplica();
  ReplicaOutputStreams out = bInfo.createStreams(true,
    DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
  try {
   OutputStream dataOut  = out.getDataOut();
   assertEquals(0, fsdataset.getLength(b));
   for (int j=1; j <= blockIdToLen(i); ++j) {
    dataOut.write(j);
    assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
    bytesAdded++;
   }
  } finally {
   out.close();
  }
  b.setNumBytes(blockIdToLen(i));
  fsdataset.finalizeBlock(b, false);
  assertEquals(blockIdToLen(i), fsdataset.getLength(b));
 }
 return bytesAdded;  
}
int addSomeBlocks(SimulatedFSDataset fsdataset ) throws IOException {
origin: org.apache.hadoop/hadoop-hdfs

IOUtils.closeStream(streams.getDataOut());
origin: ch.cern.hadoop/hadoop-hdfs

if (checksumReceivedLen == 0 && !streams.isTransientStorage()) {
  && streams.isTransientStorage();
try {
 long onDiskLen = replicaInfo.getBytesOnDisk();
origin: io.prestosql.hadoop/hadoop-apache

this.diskChecksum = streams.getChecksum();
this.needsChecksumTranslation = !clientChecksum.equals(diskChecksum);
this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
this.checksumSize = diskChecksum.getChecksumSize();
this.out = streams.getDataOut();
if (out instanceof FileOutputStream) {
 this.outFd = ((FileOutputStream)out).getFD();
  streams.getChecksumOut(), HdfsConstants.SMALL_BUFFER_SIZE));
origin: io.prestosql.hadoop/hadoop-apache

if (isSync) {
 long fsyncStartNanos = flushEndNanos;
 streams.syncChecksumOut();
 datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
if (isSync) {
 long fsyncStartNanos = flushEndNanos;
 streams.syncDataOut();
 datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
origin: ch.cern.hadoop/hadoop-hdfs

ReplicaOutputStreams outputStreams =
  rbw.createStreams(false, DEFAULT_CHECKSUM);
OutputStream dataOutput = outputStreams.getDataOut();
origin: io.prestosql.hadoop/hadoop-apache

if (checksumReceivedLen == 0 && !streams.isTransientStorage()) {
  && streams.isTransientStorage();
try {
 long onDiskLen = replicaInfo.getBytesOnDisk();
origin: ch.cern.hadoop/hadoop-hdfs

@Override
public ReplicaOutputStreams createStreams(boolean isCreate,
  DataChecksum requestedChecksum) throws IOException {
 return new ReplicaOutputStreams(null, null, requestedChecksum, false);
}
origin: ch.cern.hadoop/hadoop-hdfs

if (syncOnClose) {
 long fsyncStartNanos = flushEndNanos;
 streams.syncChecksumOut();
 datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
if (syncOnClose) {
 long fsyncStartNanos = flushEndNanos;
 streams.syncDataOut();
 datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
  int checksumSize) throws IOException {
 FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
 FileChannel channel = file.getChannel();
 long oldPos = channel.position();
 long newPos = oldPos - checksumSize;
 if (LOG.isDebugEnabled()) {
  LOG.debug("Changing meta file offset of block " + b + " from " +
    oldPos + " to " + newPos);
 }
 channel.position(newPos);
}
origin: linkedin/dynamometer

@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate,
  DataChecksum requestedChecksum) throws IOException {
 if (finalized) {
  throw new IOException("Trying to write to a finalized replica "
    + theBlock);
 } else {
  SimulatedOutputStream crcStream = new SimulatedOutputStream();
  return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum,
    getStorage(theBlock).getVolume().isTransientStorage());
 }
}
org.apache.hadoop.hdfs.server.datanode.fsdatasetReplicaOutputStreams

Javadoc

Contains the output streams for the data and checksum of a replica.

Most used methods

  • <init>
    Create an object with a data output stream, a checksum output stream and a checksum.
  • getChecksumOut
  • getDataOut
  • getChecksum
  • isTransientStorage
  • syncChecksumOut
    Sync the checksum stream if it supports it.
  • syncDataOut
    Sync the data stream if it supports it.
  • close
  • closeDataStream
  • dropCacheBehindWrites
  • flushDataOut
    Flush the data stream if it supports it.
  • getOutFd
  • flushDataOut,
  • getOutFd,
  • syncFileRangeIfPossible,
  • writeDataToDisk

Popular in Java

  • Finding current android device location
  • getSystemService (Context)
  • getContentResolver (Context)
  • compareTo (BigDecimal)
  • Point (java.awt)
    A point representing a location in (x,y) coordinate space, specified in integer precision.
  • Socket (java.net)
    Provides a client-side TCP socket.
  • URI (java.net)
    A Uniform Resource Identifier that identifies an abstract or physical resource, as specified by RFC
  • Connection (java.sql)
    A connection represents a link from a Java application to a database. All SQL statements and results
  • Date (java.sql)
    A class which can consume and produce dates in SQL Date format. Dates are represented in SQL as yyyy
  • LinkedList (java.util)
    Doubly-linked list implementation of the List and Dequeinterfaces. Implements all optional list oper
  • Top 12 Jupyter Notebook extensions
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now