Tabnine Logo
EditsDoubleBuffer.<init>
Code IndexAdd Tabnine to your IDE (free)

How to use
org.apache.hadoop.hdfs.server.namenode.EditsDoubleBuffer
constructor

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.EditsDoubleBuffer.<init> (Showing top 15 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

public QuorumOutputStream(AsyncLoggerSet loggers,
  long txId, int outputBufferCapacity,
  int writeTimeoutMs, boolean updateCommittedTxId) throws IOException {
 super();
 this.buf = new EditsDoubleBuffer(outputBufferCapacity);
 this.loggers = loggers;
 this.segmentTxId = txId;
 this.writeTimeoutMs = writeTimeoutMs;
 this.updateCommittedTxId = updateCommittedTxId;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * There is no persistent storage. Just clear the buffers.
 */
@Override // EditLogOutputStream
public void create(int layoutVersion) throws IOException {
 assert doubleBuf.isFlushed() : "previous data is not flushed yet";
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Creates output buffers and file object.
 * 
 * @param conf
 *          Configuration object
 * @param name
 *          File name to store edit log
 * @param size
 *          Size of flush buffer
 * @throws IOException
 */
public EditLogFileOutputStream(Configuration conf, File name, int size)
  throws IOException {
 super();
 shouldSyncWritesAndSkipFsync = conf.getBoolean(
     DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH,
     DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT);
 file = name;
 doubleBuf = new EditsDoubleBuffer(size);
 RandomAccessFile rp;
 if (shouldSyncWritesAndSkipFsync) {
  rp = new RandomAccessFile(name, "rws");
 } else {
  rp = new RandomAccessFile(name, "rw");
 }
 fp = new FileOutputStream(rp.getFD()); // open for append
 fc = rp.getChannel();
 fc.position(fc.size());
}
origin: org.apache.hadoop/hadoop-hdfs

EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
             JournalInfo journalInfo) // active name-node
throws IOException {
 super();
 this.bnRegistration = bnReg;
 this.journalInfo = journalInfo;
 InetSocketAddress bnAddress =
  NetUtils.createSocketAddr(bnRegistration.getAddress());
 try {
  this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
    bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
    true).getProxy();
 } catch(IOException e) {
  Storage.LOG.error("Error connecting to: " + bnAddress, e);
  throw e;
 }
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
 this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}

origin: ch.cern.hadoop/hadoop-hdfs

public QuorumOutputStream(AsyncLoggerSet loggers,
  long txId, int outputBufferCapacity,
  int writeTimeoutMs) throws IOException {
 super();
 this.buf = new EditsDoubleBuffer(outputBufferCapacity);
 this.loggers = loggers;
 this.segmentTxId = txId;
 this.writeTimeoutMs = writeTimeoutMs;
}
origin: io.prestosql.hadoop/hadoop-apache

public QuorumOutputStream(AsyncLoggerSet loggers,
  long txId, int outputBufferCapacity,
  int writeTimeoutMs) throws IOException {
 super();
 this.buf = new EditsDoubleBuffer(outputBufferCapacity);
 this.loggers = loggers;
 this.segmentTxId = txId;
 this.writeTimeoutMs = writeTimeoutMs;
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * There is no persistent storage. Just clear the buffers.
 */
@Override // EditLogOutputStream
public void create(int layoutVersion) throws IOException {
 assert doubleBuf.isFlushed() : "previous data is not flushed yet";
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * There is no persistent storage. Just clear the buffers.
 */
@Override // EditLogOutputStream
public void create(int layoutVersion) throws IOException {
 assert doubleBuf.isFlushed() : "previous data is not flushed yet";
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Creates output buffers and file object.
 * 
 * @param conf
 *          Configuration object
 * @param name
 *          File name to store edit log
 * @param size
 *          Size of flush buffer
 * @throws IOException
 */
public EditLogFileOutputStream(Configuration conf, File name, int size)
  throws IOException {
 super();
 shouldSyncWritesAndSkipFsync = conf.getBoolean(
     DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH,
     DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT);
 file = name;
 doubleBuf = new EditsDoubleBuffer(size);
 RandomAccessFile rp;
 if (shouldSyncWritesAndSkipFsync) {
  rp = new RandomAccessFile(name, "rws");
 } else {
  rp = new RandomAccessFile(name, "rw");
 }
 fp = new FileOutputStream(rp.getFD()); // open for append
 fc = rp.getChannel();
 fc.position(fc.size());
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Creates output buffers and file object.
 * 
 * @param conf
 *          Configuration object
 * @param name
 *          File name to store edit log
 * @param size
 *          Size of flush buffer
 * @throws IOException
 */
public EditLogFileOutputStream(Configuration conf, File name, int size)
  throws IOException {
 super();
 shouldSyncWritesAndSkipFsync = conf.getBoolean(
     DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH,
     DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT);
 file = name;
 doubleBuf = new EditsDoubleBuffer(size);
 RandomAccessFile rp;
 if (shouldSyncWritesAndSkipFsync) {
  rp = new RandomAccessFile(name, "rws");
 } else {
  rp = new RandomAccessFile(name, "rw");
 }
 fp = new FileOutputStream(rp.getFD()); // open for append
 fc = rp.getChannel();
 fc.position(fc.size());
}
origin: com.facebook.hadoop/hadoop-core

           FSEditLog.maxBufferedTransactions);
file = name;
doubleBuf = new EditsDoubleBuffer(FSEditLog.sizeFlushBuffer);
RandomAccessFile rp = new RandomAccessFile(name, "rw");
fp = new FileOutputStream(rp.getFD()); // open for append
origin: ch.cern.hadoop/hadoop-hdfs

EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
             JournalInfo journalInfo) // active name-node
throws IOException {
 super();
 this.bnRegistration = bnReg;
 this.journalInfo = journalInfo;
 InetSocketAddress bnAddress =
  NetUtils.createSocketAddr(bnRegistration.getAddress());
 try {
  this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
    bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
    true).getProxy();
 } catch(IOException e) {
  Storage.LOG.error("Error connecting to: " + bnAddress, e);
  throw e;
 }
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
 this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}

origin: io.prestosql.hadoop/hadoop-apache

EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
             JournalInfo journalInfo) // active name-node
throws IOException {
 super();
 this.bnRegistration = bnReg;
 this.journalInfo = journalInfo;
 InetSocketAddress bnAddress =
  NetUtils.createSocketAddr(bnRegistration.getAddress());
 try {
  this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
    bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
    true).getProxy();
 } catch(IOException e) {
  Storage.LOG.error("Error connecting to: " + bnAddress, e);
  throw e;
 }
 this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
 this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}

origin: ch.cern.hadoop/hadoop-hdfs

 @Test
 public void shouldFailToCloseWhenUnflushed() throws IOException {
  EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
  buf.writeRaw(new byte[1], 0, 1);
  try {
   buf.close();
   fail("Did not fail to close with unflushed data");
  } catch (IOException ioe) {
   if (!ioe.toString().contains("still to be flushed")) {
    throw ioe;
   }
  }
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testDoubleBuffer() throws IOException {
 EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
org.apache.hadoop.hdfs.server.namenodeEditsDoubleBuffer<init>

Popular methods of EditsDoubleBuffer

  • close
  • countBufferedBytes
  • flushTo
    Writes the content of the "ready" buffer to the given output stream, and resets it. Does not swap an
  • isFlushed
  • setReadyToFlush
  • getCurrentBuf
  • shouldForceSync
  • writeOp
  • writeRaw
  • countReadyBytes
  • countReadyTxns
  • getFirstReadyTxId
  • countReadyTxns,
  • getFirstReadyTxId,
  • getReadyBuf

Popular in Java

  • Start an intent from android
  • requestLocationUpdates (LocationManager)
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • onCreateOptionsMenu (Activity)
  • System (java.lang)
    Provides access to system-related information and resources including standard input and output. Ena
  • Permission (java.security)
    Legacy security code; do not use.
  • Properties (java.util)
    A Properties object is a Hashtable where the keys and values must be Strings. Each property can have
  • Semaphore (java.util.concurrent)
    A counting semaphore. Conceptually, a semaphore maintains a set of permits. Each #acquire blocks if
  • Servlet (javax.servlet)
    Defines methods that all servlets must implement. A servlet is a small Java program that runs within
  • StringUtils (org.apache.commons.lang)
    Operations on java.lang.String that arenull safe. * IsEmpty/IsBlank - checks if a String contains
  • Top 12 Jupyter Notebook extensions
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now