Tabnine Logo
FSEditLogLoader$PositionTrackingInputStream.<init>
Code IndexAdd Tabnine to your IDE (free)

How to use
org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader$PositionTrackingInputStream
constructor

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader$PositionTrackingInputStream.<init> (Showing top 14 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Process image file.
 */
public void go() throws IOException  {
 DataInputStream in = null;
 PositionTrackingInputStream tracker = null;
 ImageLoader fsip = null;
 boolean done = false;
 try {
  tracker = new PositionTrackingInputStream(new BufferedInputStream(
       new FileInputStream(new File(inputFile))));
  in = new DataInputStream(tracker);
  int imageVersionFile = findImageVersion(in);
  fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
  if(fsip == null) 
   throw new IOException("No image processor to read version " +
     imageVersionFile + " is available.");
  fsip.loadImage(in, processor, skipBlocks);
  done = true;
 } finally {
  if (!done) {
   if (tracker != null) {
    LOG.error("image loading failed at offset " + tracker.getPos());
   } else {
    LOG.error("Failed to load image file.");
   }
  }
  IOUtils.cleanupWithLogger(LOG, in, tracker);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 private void dumpRemainingEditLogs() {
  byte[] buf = this.getData();
  byte[] remainingRawEdits = Arrays.copyOfRange(buf, 0, this.size());
  ByteArrayInputStream bis = new ByteArrayInputStream(remainingRawEdits);
  DataInputStream dis = new DataInputStream(bis);
  FSEditLogLoader.PositionTrackingInputStream tracker =
    new FSEditLogLoader.PositionTrackingInputStream(bis);
  FSEditLogOp.Reader reader = FSEditLogOp.Reader.create(dis, tracker,
    NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  FSEditLogOp op;
  LOG.warn("The edits buffer is " + size() + " bytes long with " + numTxns +
    " unflushed transactions. " +
    "Below is the list of unflushed transactions:");
  int numTransactions = 0;
  try {
   while ((op = reader.readOp(false)) != null) {
    LOG.warn("Unflushed op [" + numTransactions + "]: " + op);
    numTransactions++;
   }
  } catch (IOException ioe) {
   // If any exceptions, print raw bytes and stop.
   LOG.warn("Unable to dump remaining ops. Remaining raw bytes: " +
     Hex.encodeHexString(remainingRawEdits), ioe);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

void setBytes(byte[] newBytes, int version) throws IOException {
 inner.setData(newBytes);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
 in = new DataInputStream(tracker);
 this.version = version;
 reader = FSEditLogOp.Reader.create(in, tracker, version);
}
origin: ch.cern.hadoop/hadoop-hdfs

BufferedInputStream bin = new BufferedInputStream(fin);
FSEditLogLoader.PositionTrackingInputStream tracker = 
  new FSEditLogLoader.PositionTrackingInputStream(bin);
try {
 tracker.setLimit(2);
origin: com.facebook.hadoop/hadoop-core

/**
 * Open an EditLogInputStream for the given file.
 * @param name filename to open
 * @param firstTxId first transaction found in file
 * @param lastTxId last transaction id found in file
 * @throws LogHeaderCorruptException if the header is either missing or
 *         appears to be corrupt/truncated
 * @throws IOException if an actual IO error occurs while reading the
 *         header
 */
EditLogFileInputStream(File name, long firstTxId, long lastTxId)
  throws LogHeaderCorruptException, IOException {
 file = name;
 rp = new RandomAccessFile(file, "r");    
 fStream = new FileInputStream(rp.getFD());
 fc = rp.getChannel();
 BufferedInputStream bin = new BufferedInputStream(fStream);  
 tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);  
 DataInputStream in = new DataInputStream(tracker);
 try {
  logVersion = readLogVersion(in);
 } catch (EOFException eofe) {
  throw new LogHeaderCorruptException("No header found in log");
 }
 reader = new FSEditLogOp.Reader(in, logVersion);
 this.firstTxId = firstTxId;
 this.lastTxId = lastTxId;
}
origin: io.prestosql.hadoop/hadoop-apache

fStream = log.getInputStream();
bin = new BufferedInputStream(fStream);
tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
dataIn = new DataInputStream(tracker);
try {
origin: ch.cern.hadoop/hadoop-hdfs

fStream = log.getInputStream();
bin = new BufferedInputStream(fStream);
tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
dataIn = new DataInputStream(tracker);
try {
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Process image file.
 */
public void go() throws IOException  {
 DataInputStream in = null;
 PositionTrackingInputStream tracker = null;
 ImageLoader fsip = null;
 boolean done = false;
 try {
  tracker = new PositionTrackingInputStream(new BufferedInputStream(
       new FileInputStream(new File(inputFile))));
  in = new DataInputStream(tracker);
  int imageVersionFile = findImageVersion(in);
  fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
  if(fsip == null) 
   throw new IOException("No image processor to read version " +
     imageVersionFile + " is available.");
  fsip.loadImage(in, processor, skipBlocks);
  done = true;
 } finally {
  if (!done) {
   LOG.error("image loading failed at offset " + tracker.getPos());
  }
  IOUtils.cleanup(LOG, in, tracker);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Process image file.
 */
public void go() throws IOException  {
 DataInputStream in = null;
 PositionTrackingInputStream tracker = null;
 ImageLoader fsip = null;
 boolean done = false;
 try {
  tracker = new PositionTrackingInputStream(new BufferedInputStream(
       new FileInputStream(new File(inputFile))));
  in = new DataInputStream(tracker);
  int imageVersionFile = findImageVersion(in);
  fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
  if(fsip == null) 
   throw new IOException("No image processor to read version " +
     imageVersionFile + " is available.");
  fsip.loadImage(in, processor, skipBlocks);
  done = true;
 } finally {
  if (!done) {
   LOG.error("image loading failed at offset " + tracker.getPos());
  }
  IOUtils.cleanup(LOG, in, tracker);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

void setBytes(byte[] newBytes, int version) throws IOException {
 inner.setData(newBytes);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
 in = new DataInputStream(tracker);
 this.version = version;
 reader = new FSEditLogOp.Reader(in, tracker, version);
}
origin: com.facebook.hadoop/hadoop-core

@Override
public void refresh(long position) throws IOException {
 fc.position(position);
 BufferedInputStream bin = new BufferedInputStream(fStream);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(bin, position);    
 DataInputStream in = new DataInputStream(tracker); 
 reader = new FSEditLogOp.Reader(in, logVersion);
}

origin: ch.cern.hadoop/hadoop-hdfs

void setBytes(byte[] newBytes, int version) throws IOException {
 inner.setData(newBytes);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
 in = new DataInputStream(tracker);
 this.version = version;
 reader = new FSEditLogOp.Reader(in, tracker, version);
}
origin: ch.cern.hadoop/hadoop-hdfs

public EditLogByteInputStream(byte[] data) throws IOException {
 len = data.length;
 input = new ByteArrayInputStream(data);
 BufferedInputStream bin = new BufferedInputStream(input);
 DataInputStream in = new DataInputStream(bin);
 version = EditLogFileInputStream.readLogVersion(in, true);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
 in = new DataInputStream(tracker);
    
 reader = new FSEditLogOp.Reader(in, tracker, version);
}
origin: org.apache.hadoop/hadoop-hdfs

fStream = log.getInputStream();
bin = new BufferedInputStream(fStream);
tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
dataIn = new DataInputStream(tracker);
try {
org.apache.hadoop.hdfs.server.namenodeFSEditLogLoader$PositionTrackingInputStream<init>

Popular methods of FSEditLogLoader$PositionTrackingInputStream

  • getPos
  • checkLimit
  • clearLimit
  • close
  • mark
  • read
  • reset
  • setLimit

Popular in Java

  • Reading from database using SQL prepared statement
  • runOnUiThread (Activity)
  • addToBackStack (FragmentTransaction)
  • onRequestPermissionsResult (Fragment)
  • GridLayout (java.awt)
    The GridLayout class is a layout manager that lays out a container's components in a rectangular gri
  • URLConnection (java.net)
    A connection to a URL for reading or writing. For HTTP connections, see HttpURLConnection for docume
  • MessageFormat (java.text)
    Produces concatenated messages in language-neutral way. New code should probably use java.util.Forma
  • Callable (java.util.concurrent)
    A task that returns a result and may throw an exception. Implementors define a single method with no
  • JTable (javax.swing)
  • LoggerFactory (org.slf4j)
    The LoggerFactory is a utility class producing Loggers for various logging APIs, most notably for lo
  • Sublime Text for Python
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now