Tabnine Logo
EditsDoubleBuffer.isFlushed
Code IndexAdd Tabnine to your IDE (free)

How to use
isFlushed
method
in
org.apache.hadoop.hdfs.server.namenode.EditsDoubleBuffer

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.EditsDoubleBuffer.isFlushed (Showing top 15 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

public void setReadyToFlush() {
 assert isFlushed() : "previous data not flushed yet";
 TxnBuffer tmp = bufReady;
 bufReady = bufCurrent;
 bufCurrent = tmp;
}

origin: org.apache.hadoop/hadoop-hdfs

/**
 * There is no persistent storage. Just clear the buffers.
 */
@Override // EditLogOutputStream
public void create(int layoutVersion) throws IOException {
 assert doubleBuf.isFlushed() : "previous data is not flushed yet";
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
public void flushAndSync(boolean durable) throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 preallocate(); // preallocate file if necessary
 doubleBuf.flushTo(fp);
 if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) {
  fc.force(false); // metadata updates not needed
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // EditLogOutputStream
protected void flushAndSync(boolean durable) throws IOException {
 assert out.getLength() == 0 : "Output buffer is not empty";
 
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 int numReadyTxns = doubleBuf.countReadyTxns();
 long firstTxToFlush = doubleBuf.getFirstReadyTxId();
 
 doubleBuf.flushTo(out);
 if (out.getLength() > 0) {
  assert numReadyTxns > 0;
  
  byte[] data = Arrays.copyOf(out.getData(), out.getLength());
  out.reset();
  assert out.getLength() == 0 : "Output buffer is not empty";
  backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
 }
}
origin: com.facebook.hadoop/hadoop-core

void setReadyToFlush() {
 assert isFlushed() : "previous data not flushed yet";
 TxnBuffer tmp = bufReady;
 bufReady = bufCurrent;
 bufCurrent = tmp;
}

origin: io.prestosql.hadoop/hadoop-apache

public void setReadyToFlush() {
 assert isFlushed() : "previous data not flushed yet";
 TxnBuffer tmp = bufReady;
 bufReady = bufCurrent;
 bufCurrent = tmp;
}

origin: ch.cern.hadoop/hadoop-hdfs

public void setReadyToFlush() {
 assert isFlushed() : "previous data not flushed yet";
 TxnBuffer tmp = bufReady;
 bufReady = bufCurrent;
 bufCurrent = tmp;
}

origin: io.prestosql.hadoop/hadoop-apache

/**
 * There is no persistent storage. Just clear the buffers.
 */
@Override // EditLogOutputStream
public void create(int layoutVersion) throws IOException {
 assert doubleBuf.isFlushed() : "previous data is not flushed yet";
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * There is no persistent storage. Just clear the buffers.
 */
@Override // EditLogOutputStream
public void create(int layoutVersion) throws IOException {
 assert doubleBuf.isFlushed() : "previous data is not flushed yet";
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
}
origin: com.facebook.hadoop/hadoop-core

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
protected void flushAndSync() throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 preallocate(); // preallocate file if necessary
 if (doubleBuf.isFlushed()) {
  return;
 }
 doubleBuf.flushTo(fp);
 fc.force(false); // metadata updates not needed
 fc.position(fc.position() - 1); // skip back the end-of-file marker         
}

origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
public void flushAndSync(boolean durable) throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 preallocate(); // preallocate file if necessary
 doubleBuf.flushTo(fp);
 if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) {
  fc.force(false); // metadata updates not needed
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Flush ready buffer to persistent store. currentBuffer is not flushed as it
 * accumulates new log records while readyBuffer will be flushed and synced.
 */
@Override
public void flushAndSync(boolean durable) throws IOException {
 if (fp == null) {
  throw new IOException("Trying to use aborted output stream");
 }
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 preallocate(); // preallocate file if necessary
 doubleBuf.flushTo(fp);
 if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) {
  fc.force(false); // metadata updates not needed
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

@Override // EditLogOutputStream
protected void flushAndSync(boolean durable) throws IOException {
 assert out.getLength() == 0 : "Output buffer is not empty";
 
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 int numReadyTxns = doubleBuf.countReadyTxns();
 long firstTxToFlush = doubleBuf.getFirstReadyTxId();
 
 doubleBuf.flushTo(out);
 if (out.getLength() > 0) {
  assert numReadyTxns > 0;
  
  byte[] data = Arrays.copyOf(out.getData(), out.getLength());
  out.reset();
  assert out.getLength() == 0 : "Output buffer is not empty";
  backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

@Override // EditLogOutputStream
protected void flushAndSync(boolean durable) throws IOException {
 assert out.getLength() == 0 : "Output buffer is not empty";
 
 if (doubleBuf.isFlushed()) {
  LOG.info("Nothing to flush");
  return;
 }
 int numReadyTxns = doubleBuf.countReadyTxns();
 long firstTxToFlush = doubleBuf.getFirstReadyTxId();
 
 doubleBuf.flushTo(out);
 if (out.getLength() > 0) {
  assert numReadyTxns > 0;
  
  byte[] data = Arrays.copyOf(out.getData(), out.getLength());
  out.reset();
  assert out.getLength() == 0 : "Output buffer is not empty";
  backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
assertTrue(buf.isFlushed());
byte[] data = new byte[100];
buf.writeRaw(data, 0, data.length);
  buf.isFlushed());
assertEquals("Swapping buffers should still count buffered bytes",
  data.length, buf.countBufferedBytes());
assertFalse(buf.isFlushed());
buf.flushTo(outBuf);
assertEquals(data.length, outBuf.getLength());
assertTrue(buf.isFlushed());
assertEquals(0, buf.countBufferedBytes());
org.apache.hadoop.hdfs.server.namenodeEditsDoubleBufferisFlushed

Popular methods of EditsDoubleBuffer

  • <init>
  • close
  • countBufferedBytes
  • flushTo
    Writes the content of the "ready" buffer to the given output stream, and resets it. Does not swap an
  • setReadyToFlush
  • getCurrentBuf
  • shouldForceSync
  • writeOp
  • writeRaw
  • countReadyBytes
  • countReadyTxns
  • getFirstReadyTxId
  • countReadyTxns,
  • getFirstReadyTxId,
  • getReadyBuf

Popular in Java

  • Creating JSON documents from java classes using gson
  • getApplicationContext (Context)
  • getSupportFragmentManager (FragmentActivity)
  • getExternalFilesDir (Context)
  • Window (java.awt)
    A Window object is a top-level window with no borders and no menubar. The default layout for a windo
  • BufferedWriter (java.io)
    Wraps an existing Writer and buffers the output. Expensive interaction with the underlying reader is
  • TimeZone (java.util)
    TimeZone represents a time zone offset, and also figures out daylight savings. Typically, you get a
  • Timer (java.util)
    Timers schedule one-shot or recurring TimerTask for execution. Prefer java.util.concurrent.Scheduled
  • Executor (java.util.concurrent)
    An object that executes submitted Runnable tasks. This interface provides a way of decoupling task s
  • Pattern (java.util.regex)
    Patterns are compiled regular expressions. In many cases, convenience methods such as String#matches
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now