Tabnine Logo
EditsDoubleBuffer.flushTo
Code IndexAdd Tabnine to your IDE (free)

How to use
flushTo
method
in
org.apache.hadoop.hdfs.server.namenode.EditsDoubleBuffer

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.EditsDoubleBuffer.flushTo (Showing top 11 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
public void flushAndSync(boolean durable) throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 preallocate(); // preallocate file if necessary
 doubleBuf.flushTo(fp);
 if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) {
  fc.force(false); // metadata updates not needed
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // EditLogOutputStream
protected void flushAndSync(boolean durable) throws IOException {
 assert out.getLength() == 0 : "Output buffer is not empty";
 
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 int numReadyTxns = doubleBuf.countReadyTxns();
 long firstTxToFlush = doubleBuf.getFirstReadyTxId();
 
 doubleBuf.flushTo(out);
 if (out.getLength() > 0) {
  assert numReadyTxns > 0;
  
  byte[] data = Arrays.copyOf(out.getData(), out.getLength());
  out.reset();
  assert out.getLength() == 0 : "Output buffer is not empty";
  backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

buf.flushTo(bufToSend);
assert bufToSend.getLength() == numReadyBytes;
byte[] data = bufToSend.getData();
origin: com.facebook.hadoop/hadoop-core

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
protected void flushAndSync() throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 preallocate(); // preallocate file if necessary
 if (doubleBuf.isFlushed()) {
  return;
 }
 doubleBuf.flushTo(fp);
 fc.force(false); // metadata updates not needed
 fc.position(fc.position() - 1); // skip back the end-of-file marker         
}

origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
public void flushAndSync(boolean durable) throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 preallocate(); // preallocate file if necessary
 doubleBuf.flushTo(fp);
 if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) {
  fc.force(false); // metadata updates not needed
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
public void flushAndSync(boolean durable) throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 preallocate(); // preallocate file if necessary
 doubleBuf.flushTo(fp);
 if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) {
  fc.force(false); // metadata updates not needed
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

@Override // EditLogOutputStream
protected void flushAndSync(boolean durable) throws IOException {
 assert out.getLength() == 0 : "Output buffer is not empty";
 
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 int numReadyTxns = doubleBuf.countReadyTxns();
 long firstTxToFlush = doubleBuf.getFirstReadyTxId();
 
 doubleBuf.flushTo(out);
 if (out.getLength() > 0) {
  assert numReadyTxns > 0;
  
  byte[] data = Arrays.copyOf(out.getData(), out.getLength());
  out.reset();
  assert out.getLength() == 0 : "Output buffer is not empty";
  backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

@Override // EditLogOutputStream
protected void flushAndSync(boolean durable) throws IOException {
 assert out.getLength() == 0 : "Output buffer is not empty";
 
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 int numReadyTxns = doubleBuf.countReadyTxns();
 long firstTxToFlush = doubleBuf.getFirstReadyTxId();
 
 doubleBuf.flushTo(out);
 if (out.getLength() > 0) {
  assert numReadyTxns > 0;
  
  byte[] data = Arrays.copyOf(out.getData(), out.getLength());
  out.reset();
  assert out.getLength() == 0 : "Output buffer is not empty";
  backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

@Override
protected void flushAndSync(boolean durable) throws IOException {
 int numReadyBytes = buf.countReadyBytes();
 if (numReadyBytes > 0) {
  int numReadyTxns = buf.countReadyTxns();
  long firstTxToFlush = buf.getFirstReadyTxId();
  assert numReadyTxns > 0;
  // Copy from our double-buffer into a new byte array. This is for
  // two reasons:
  // 1) The IPC code has no way of specifying to send only a slice of
  //    a larger array.
  // 2) because the calls to the underlying nodes are asynchronous, we
  //    need a defensive copy to avoid accidentally mutating the buffer
  //    before it is sent.
  DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes);
  buf.flushTo(bufToSend);
  assert bufToSend.getLength() == numReadyBytes;
  byte[] data = bufToSend.getData();
  assert data.length == bufToSend.getLength();
  QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits(
    segmentTxId, firstTxToFlush,
    numReadyTxns, data);
  loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits");
  
  // Since we successfully wrote this batch, let the loggers know. Any future
  // RPCs will thus let the loggers know of the most recent transaction, even
  // if a logger has fallen behind.
  loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

@Override
protected void flushAndSync(boolean durable) throws IOException {
 int numReadyBytes = buf.countReadyBytes();
 if (numReadyBytes > 0) {
  int numReadyTxns = buf.countReadyTxns();
  long firstTxToFlush = buf.getFirstReadyTxId();
  assert numReadyTxns > 0;
  // Copy from our double-buffer into a new byte array. This is for
  // two reasons:
  // 1) The IPC code has no way of specifying to send only a slice of
  //    a larger array.
  // 2) because the calls to the underlying nodes are asynchronous, we
  //    need a defensive copy to avoid accidentally mutating the buffer
  //    before it is sent.
  DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes);
  buf.flushTo(bufToSend);
  assert bufToSend.getLength() == numReadyBytes;
  byte[] data = bufToSend.getData();
  assert data.length == bufToSend.getLength();
  QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits(
    segmentTxId, firstTxToFlush,
    numReadyTxns, data);
  loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits");
  
  // Since we successfully wrote this batch, let the loggers know. Any future
  // RPCs will thus let the loggers know of the most recent transaction, even
  // if a logger has fallen behind.
  loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

buf.flushTo(outBuf);
assertEquals(data.length, outBuf.getLength());
assertTrue(buf.isFlushed());
  data.length, buf.countBufferedBytes());
buf.setReadyToFlush();
buf.flushTo(outBuf);
org.apache.hadoop.hdfs.server.namenodeEditsDoubleBufferflushTo

Javadoc

Writes the content of the "ready" buffer to the given output stream, and resets it. Does not swap any buffers.

Popular methods of EditsDoubleBuffer

  • <init>
  • close
  • countBufferedBytes
  • isFlushed
  • setReadyToFlush
  • getCurrentBuf
  • shouldForceSync
  • writeOp
  • writeRaw
  • countReadyBytes
  • countReadyTxns
  • getFirstReadyTxId
  • countReadyTxns,
  • getFirstReadyTxId,
  • getReadyBuf

Popular in Java

  • Creating JSON documents from java classes using gson
  • onCreateOptionsMenu (Activity)
  • compareTo (BigDecimal)
  • onRequestPermissionsResult (Fragment)
  • Graphics2D (java.awt)
    This Graphics2D class extends the Graphics class to provide more sophisticated control overgraphics
  • Menu (java.awt)
  • InetAddress (java.net)
    An Internet Protocol (IP) address. This can be either an IPv4 address or an IPv6 address, and in pra
  • ResourceBundle (java.util)
    ResourceBundle is an abstract class which is the superclass of classes which provide Locale-specifi
  • JarFile (java.util.jar)
    JarFile is used to read jar entries and their associated data from jar files.
  • HttpServletRequest (javax.servlet.http)
    Extends the javax.servlet.ServletRequest interface to provide request information for HTTP servlets.
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now