Tabnine Logo
EditsDoubleBuffer
Code IndexAdd Tabnine to your IDE (free)

How to use
EditsDoubleBuffer
in
org.apache.hadoop.hdfs.server.namenode

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.EditsDoubleBuffer (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * There is no persistent storage. Just clear the buffers.
 */
@Override // EditLogOutputStream
public void create(int layoutVersion) throws IOException {
 assert doubleBuf.isFlushed() : "previous data is not flushed yet";
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // EditLogOutputStream
public void close() throws IOException {
 // close should have been called after all pending transactions 
 // have been flushed & synced.
 int size = doubleBuf.countBufferedBytes();
 if (size != 0) {
  throw new IOException("BackupEditStream has " + size +
            " records still to be flushed and cannot be closed.");
 } 
 RPC.stopProxy(backupNode); // stop the RPC threads
 doubleBuf.close();
 doubleBuf = null;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // EditLogOutputStream
protected void flushAndSync(boolean durable) throws IOException {
 assert out.getLength() == 0 : "Output buffer is not empty";
 
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 int numReadyTxns = doubleBuf.countReadyTxns();
 long firstTxToFlush = doubleBuf.getFirstReadyTxId();
 
 doubleBuf.flushTo(out);
 if (out.getLength() > 0) {
  assert numReadyTxns > 0;
  
  byte[] data = Arrays.copyOf(out.getData(), out.getLength());
  out.reset();
  assert out.getLength() == 0 : "Output buffer is not empty";
  backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
public void flushAndSync(boolean durable) throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 preallocate(); // preallocate file if necessary
 doubleBuf.flushTo(fp);
 if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) {
  fc.force(false); // metadata updates not needed
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testDoubleBuffer() throws IOException {
 EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
 assertTrue(buf.isFlushed());
 byte[] data = new byte[100];
 buf.writeRaw(data, 0, data.length);
 assertEquals("Should count new data correctly",
   data.length, buf.countBufferedBytes());
   buf.isFlushed());
 buf.setReadyToFlush();
 assertEquals("Swapping buffers should still count buffered bytes",
   data.length, buf.countBufferedBytes());
 assertFalse(buf.isFlushed());
 buf.flushTo(outBuf);
 assertEquals(data.length, outBuf.getLength());
 assertTrue(buf.isFlushed());
 assertEquals(0, buf.countBufferedBytes());
 buf.writeRaw(data, 0, data.length);
 assertEquals("Should count new data correctly",
   data.length, buf.countBufferedBytes());
 buf.setReadyToFlush();
 buf.flushTo(outBuf);
 assertEquals(0, buf.countBufferedBytes());
origin: org.apache.hadoop/hadoop-hdfs

@Override
protected void flushAndSync(boolean durable) throws IOException {
 int numReadyBytes = buf.countReadyBytes();
 if (numReadyBytes > 0) {
  int numReadyTxns = buf.countReadyTxns();
  long firstTxToFlush = buf.getFirstReadyTxId();
  buf.flushTo(bufToSend);
  assert bufToSend.getLength() == numReadyBytes;
  byte[] data = bufToSend.getData();
origin: ch.cern.hadoop/hadoop-hdfs

 @Test
 public void shouldFailToCloseWhenUnflushed() throws IOException {
  EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
  buf.writeRaw(new byte[1], 0, 1);
  try {
   buf.close();
   fail("Did not fail to close with unflushed data");
  } catch (IOException ioe) {
   if (!ioe.toString().contains("still to be flushed")) {
    throw ioe;
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

public QuorumOutputStream(AsyncLoggerSet loggers,
  long txId, int outputBufferCapacity,
  int writeTimeoutMs, boolean updateCommittedTxId) throws IOException {
 super();
 this.buf = new EditsDoubleBuffer(outputBufferCapacity);
 this.loggers = loggers;
 this.segmentTxId = txId;
 this.writeTimeoutMs = writeTimeoutMs;
 this.updateCommittedTxId = updateCommittedTxId;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * All data that has been written to the stream so far will be flushed. New
 * data can be still written to the stream while flushing is performed.
 */
@Override
public void setReadyToFlush() throws IOException {
 doubleBuf.setReadyToFlush();
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void close() throws IOException {
 if (buf != null) {
  buf.close();
  buf = null;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void write(FSEditLogOp op) throws IOException {
 buf.writeOp(op);
}
origin: com.facebook.hadoop/hadoop-core

/**
 * All data that has been written to the stream so far will be flushed. New
 * data can be still written to the stream while flushing is performed.
 */
@Override
public void setReadyToFlush() throws IOException {
 doubleBuf.getCurrentBuf().write(FSEditLogOpCodes.OP_INVALID.getOpCode()); // insert eof marker
 doubleBuf.setReadyToFlush();
}
origin: org.apache.hadoop/hadoop-hdfs

public void setReadyToFlush() {
 assert isFlushed() : "previous data not flushed yet";
 TxnBuffer tmp = bufReady;
 bufReady = bufCurrent;
 bufCurrent = tmp;
}

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Write a transaction to the stream. The serialization format is:
 * <ul>
 *   <li>the opcode (byte)</li>
 *   <li>the transaction id (long)</li>
 *   <li>the actual Writables for the transaction</li>
 * </ul>
 * */
@Override
public void writeRaw(byte[] bytes, int offset, int length) throws IOException {
 doubleBuf.writeRaw(bytes, offset, length);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Create empty edits logs file.
 */
@Override
public void create(int layoutVersion) throws IOException {
 fc.truncate(0);
 fc.position(0);
 writeHeader(layoutVersion, doubleBuf.getCurrentBuf());
 setReadyToFlush();
 flush();
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * @return true if the number of buffered data exceeds the intial buffer size
 */
@Override
public boolean shouldForceSync() {
 return doubleBuf.shouldForceSync();
}
origin: com.facebook.hadoop/hadoop-core

/**
 * Return the size of the current edit log including buffered data.
 */
@Override
long length() throws IOException {
 // file size + size of both buffers
 return fc.size() + doubleBuf.countBufferedBytes();
}
origin: io.prestosql.hadoop/hadoop-apache

@Override
protected void flushAndSync(boolean durable) throws IOException {
 int numReadyBytes = buf.countReadyBytes();
 if (numReadyBytes > 0) {
  int numReadyTxns = buf.countReadyTxns();
  long firstTxToFlush = buf.getFirstReadyTxId();
  assert numReadyTxns > 0;
  // Copy from our double-buffer into a new byte array. This is for
  // two reasons:
  // 1) The IPC code has no way of specifying to send only a slice of
  //    a larger array.
  // 2) because the calls to the underlying nodes are asynchronous, we
  //    need a defensive copy to avoid accidentally mutating the buffer
  //    before it is sent.
  DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes);
  buf.flushTo(bufToSend);
  assert bufToSend.getLength() == numReadyBytes;
  byte[] data = bufToSend.getData();
  assert data.length == bufToSend.getLength();
  QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits(
    segmentTxId, firstTxToFlush,
    numReadyTxns, data);
  loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits");
  
  // Since we successfully wrote this batch, let the loggers know. Any future
  // RPCs will thus let the loggers know of the most recent transaction, even
  // if a logger has fallen behind.
  loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1);
 }
}
origin: com.facebook.hadoop/hadoop-core

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
protected void flushAndSync() throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 preallocate(); // preallocate file if necessary
 if (doubleBuf.isFlushed()) {
  return;
 }
 doubleBuf.flushTo(fp);
 fc.force(false); // metadata updates not needed
 fc.position(fc.position() - 1); // skip back the end-of-file marker         
}

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Creates output buffers and file object.
 * 
 * @param conf
 *          Configuration object
 * @param name
 *          File name to store edit log
 * @param size
 *          Size of flush buffer
 * @throws IOException
 */
public EditLogFileOutputStream(Configuration conf, File name, int size)
  throws IOException {
 super();
 shouldSyncWritesAndSkipFsync = conf.getBoolean(
     DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH,
     DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT);
 file = name;
 doubleBuf = new EditsDoubleBuffer(size);
 RandomAccessFile rp;
 if (shouldSyncWritesAndSkipFsync) {
  rp = new RandomAccessFile(name, "rws");
 } else {
  rp = new RandomAccessFile(name, "rw");
 }
 fp = new FileOutputStream(rp.getFD()); // open for append
 fc = rp.getChannel();
 fc.position(fc.size());
}
org.apache.hadoop.hdfs.server.namenodeEditsDoubleBuffer

Javadoc

A double-buffer for edits. New edits are written into the first buffer while the second is available to be flushed. Each time the double-buffer is flushed, the two internal buffers are swapped. This allows edits to progress concurrently to flushes without allocating new buffers each time.

Most used methods

  • <init>
  • close
  • countBufferedBytes
  • flushTo
    Writes the content of the "ready" buffer to the given output stream, and resets it. Does not swap an
  • isFlushed
  • setReadyToFlush
  • getCurrentBuf
  • shouldForceSync
  • writeOp
  • writeRaw
  • countReadyBytes
  • countReadyTxns
  • countReadyBytes,
  • countReadyTxns,
  • getFirstReadyTxId,
  • getReadyBuf

Popular in Java

  • Parsing JSON documents to java classes using gson
  • scheduleAtFixedRate (ScheduledExecutorService)
  • compareTo (BigDecimal)
  • setContentView (Activity)
  • HttpServer (com.sun.net.httpserver)
    This class implements a simple HTTP server. A HttpServer is bound to an IP address and port number a
  • FileInputStream (java.io)
    An input stream that reads bytes from a file. File file = ...finally if (in != null) in.clos
  • PrintWriter (java.io)
    Wraps either an existing OutputStream or an existing Writerand provides convenience methods for prin
  • Iterator (java.util)
    An iterator over a sequence of objects, such as a collection.If a collection has been changed since
  • NoSuchElementException (java.util)
    Thrown when trying to retrieve an element past the end of an Enumeration or Iterator.
  • BoxLayout (javax.swing)
  • Top plugins for Android Studio
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now