congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
FSEditLogLoader$PositionTrackingInputStream.<init>
Code IndexAdd Tabnine to your IDE (free)

How to use
org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader$PositionTrackingInputStream
constructor

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader$PositionTrackingInputStream.<init> (Showing top 14 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Process image file.
 */
public void go() throws IOException  {
 DataInputStream in = null;
 PositionTrackingInputStream tracker = null;
 ImageLoader fsip = null;
 boolean done = false;
 try {
  tracker = new PositionTrackingInputStream(new BufferedInputStream(
       new FileInputStream(new File(inputFile))));
  in = new DataInputStream(tracker);
  int imageVersionFile = findImageVersion(in);
  fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
  if(fsip == null) 
   throw new IOException("No image processor to read version " +
     imageVersionFile + " is available.");
  fsip.loadImage(in, processor, skipBlocks);
  done = true;
 } finally {
  if (!done) {
   if (tracker != null) {
    LOG.error("image loading failed at offset " + tracker.getPos());
   } else {
    LOG.error("Failed to load image file.");
   }
  }
  IOUtils.cleanupWithLogger(LOG, in, tracker);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 private void dumpRemainingEditLogs() {
  byte[] buf = this.getData();
  byte[] remainingRawEdits = Arrays.copyOfRange(buf, 0, this.size());
  ByteArrayInputStream bis = new ByteArrayInputStream(remainingRawEdits);
  DataInputStream dis = new DataInputStream(bis);
  FSEditLogLoader.PositionTrackingInputStream tracker =
    new FSEditLogLoader.PositionTrackingInputStream(bis);
  FSEditLogOp.Reader reader = FSEditLogOp.Reader.create(dis, tracker,
    NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  FSEditLogOp op;
  LOG.warn("The edits buffer is " + size() + " bytes long with " + numTxns +
    " unflushed transactions. " +
    "Below is the list of unflushed transactions:");
  int numTransactions = 0;
  try {
   while ((op = reader.readOp(false)) != null) {
    LOG.warn("Unflushed op [" + numTransactions + "]: " + op);
    numTransactions++;
   }
  } catch (IOException ioe) {
   // If any exceptions, print raw bytes and stop.
   LOG.warn("Unable to dump remaining ops. Remaining raw bytes: " +
     Hex.encodeHexString(remainingRawEdits), ioe);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

void setBytes(byte[] newBytes, int version) throws IOException {
 inner.setData(newBytes);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
 in = new DataInputStream(tracker);
 this.version = version;
 reader = FSEditLogOp.Reader.create(in, tracker, version);
}
origin: ch.cern.hadoop/hadoop-hdfs

BufferedInputStream bin = new BufferedInputStream(fin);
FSEditLogLoader.PositionTrackingInputStream tracker = 
  new FSEditLogLoader.PositionTrackingInputStream(bin);
try {
 tracker.setLimit(2);
origin: com.facebook.hadoop/hadoop-core

/**
 * Open an EditLogInputStream for the given file.
 * @param name filename to open
 * @param firstTxId first transaction found in file
 * @param lastTxId last transaction id found in file
 * @throws LogHeaderCorruptException if the header is either missing or
 *         appears to be corrupt/truncated
 * @throws IOException if an actual IO error occurs while reading the
 *         header
 */
EditLogFileInputStream(File name, long firstTxId, long lastTxId)
  throws LogHeaderCorruptException, IOException {
 file = name;
 rp = new RandomAccessFile(file, "r");    
 fStream = new FileInputStream(rp.getFD());
 fc = rp.getChannel();
 BufferedInputStream bin = new BufferedInputStream(fStream);  
 tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);  
 DataInputStream in = new DataInputStream(tracker);
 try {
  logVersion = readLogVersion(in);
 } catch (EOFException eofe) {
  throw new LogHeaderCorruptException("No header found in log");
 }
 reader = new FSEditLogOp.Reader(in, logVersion);
 this.firstTxId = firstTxId;
 this.lastTxId = lastTxId;
}
origin: io.prestosql.hadoop/hadoop-apache

fStream = log.getInputStream();
bin = new BufferedInputStream(fStream);
tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
dataIn = new DataInputStream(tracker);
try {
origin: ch.cern.hadoop/hadoop-hdfs

fStream = log.getInputStream();
bin = new BufferedInputStream(fStream);
tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
dataIn = new DataInputStream(tracker);
try {
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Process image file.
 */
public void go() throws IOException  {
 DataInputStream in = null;
 PositionTrackingInputStream tracker = null;
 ImageLoader fsip = null;
 boolean done = false;
 try {
  tracker = new PositionTrackingInputStream(new BufferedInputStream(
       new FileInputStream(new File(inputFile))));
  in = new DataInputStream(tracker);
  int imageVersionFile = findImageVersion(in);
  fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
  if(fsip == null) 
   throw new IOException("No image processor to read version " +
     imageVersionFile + " is available.");
  fsip.loadImage(in, processor, skipBlocks);
  done = true;
 } finally {
  if (!done) {
   LOG.error("image loading failed at offset " + tracker.getPos());
  }
  IOUtils.cleanup(LOG, in, tracker);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Process image file.
 */
public void go() throws IOException  {
 DataInputStream in = null;
 PositionTrackingInputStream tracker = null;
 ImageLoader fsip = null;
 boolean done = false;
 try {
  tracker = new PositionTrackingInputStream(new BufferedInputStream(
       new FileInputStream(new File(inputFile))));
  in = new DataInputStream(tracker);
  int imageVersionFile = findImageVersion(in);
  fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
  if(fsip == null) 
   throw new IOException("No image processor to read version " +
     imageVersionFile + " is available.");
  fsip.loadImage(in, processor, skipBlocks);
  done = true;
 } finally {
  if (!done) {
   LOG.error("image loading failed at offset " + tracker.getPos());
  }
  IOUtils.cleanup(LOG, in, tracker);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

void setBytes(byte[] newBytes, int version) throws IOException {
 inner.setData(newBytes);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
 in = new DataInputStream(tracker);
 this.version = version;
 reader = new FSEditLogOp.Reader(in, tracker, version);
}
origin: com.facebook.hadoop/hadoop-core

@Override
public void refresh(long position) throws IOException {
 fc.position(position);
 BufferedInputStream bin = new BufferedInputStream(fStream);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(bin, position);    
 DataInputStream in = new DataInputStream(tracker); 
 reader = new FSEditLogOp.Reader(in, logVersion);
}

origin: ch.cern.hadoop/hadoop-hdfs

void setBytes(byte[] newBytes, int version) throws IOException {
 inner.setData(newBytes);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
 in = new DataInputStream(tracker);
 this.version = version;
 reader = new FSEditLogOp.Reader(in, tracker, version);
}
origin: ch.cern.hadoop/hadoop-hdfs

public EditLogByteInputStream(byte[] data) throws IOException {
 len = data.length;
 input = new ByteArrayInputStream(data);
 BufferedInputStream bin = new BufferedInputStream(input);
 DataInputStream in = new DataInputStream(bin);
 version = EditLogFileInputStream.readLogVersion(in, true);
 tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
 in = new DataInputStream(tracker);
    
 reader = new FSEditLogOp.Reader(in, tracker, version);
}
origin: org.apache.hadoop/hadoop-hdfs

fStream = log.getInputStream();
bin = new BufferedInputStream(fStream);
tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
dataIn = new DataInputStream(tracker);
try {
org.apache.hadoop.hdfs.server.namenodeFSEditLogLoader$PositionTrackingInputStream<init>

Popular methods of FSEditLogLoader$PositionTrackingInputStream

  • getPos
  • checkLimit
  • clearLimit
  • close
  • mark
  • read
  • reset
  • setLimit

Popular in Java

  • Creating JSON documents from java classes using gson
  • findViewById (Activity)
  • scheduleAtFixedRate (ScheduledExecutorService)
  • runOnUiThread (Activity)
  • Kernel (java.awt.image)
  • BufferedWriter (java.io)
    Wraps an existing Writer and buffers the output. Expensive interaction with the underlying reader is
  • PrintWriter (java.io)
    Wraps either an existing OutputStream or an existing Writerand provides convenience methods for prin
  • URI (java.net)
    A Uniform Resource Identifier that identifies an abstract or physical resource, as specified by RFC
  • KeyStore (java.security)
    KeyStore is responsible for maintaining cryptographic keys and their owners. The type of the syste
  • DateFormat (java.text)
    Formats or parses dates and times.This class provides factories for obtaining instances configured f
  • From CI to AI: The AI layer in your organization
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now