congrats Icon
New! Tabnine Pro 14-day free trial
Start a free trial
Tabnine Logo
SaveNamespaceContext
Code IndexAdd Tabnine to your IDE (free)

How to use
SaveNamespaceContext
in
org.apache.hadoop.hdfs.server.namenode

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

 canceler = new Canceler();
SaveNamespaceContext ctx = new SaveNamespaceContext(
  source, txid, canceler);
 storage.reportErrorsOnDirectories(ctx.getErrorSDs());
  ctx.checkCancelled(); // throws
  assert false : "should have thrown above!";
 ctx.markComplete();
 ctx = null;
origin: org.apache.hadoop/hadoop-hdfs

void save(File newFile, FSImageCompression compression) throws IOException {
 checkNotSaved();
 final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
 final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
 final long numINodes = rootDir.getDirectoryWithQuotaFeature()
  out.writeLong(blockIdManager.getGenerationStampAtblockIdSwitch());
  out.writeLong(blockIdManager.getLastAllocatedContiguousBlockId());
  out.writeLong(context.getTxId());
  out.writeLong(sourceNamesystem.dir.getLastInodeId());
  context.checkCancelled();
  sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
  context.checkCancelled();
  sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
  context.checkCancelled();
  out.flush();
  context.checkCancelled();
  fout.getChannel().force(true);
 } finally {
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void run() {
 try {
  saveFSImage(context, sd, nnf);
 } catch (SaveNamespaceCancelledException snce) {
  LOG.info("Cancelled image saving for " + sd.getRoot() +
    ": " + snce.getMessage());
  // don't report an error on the storage dir!
 } catch (Throwable t) {
  LOG.error("Unable to save image for " + sd.getRoot(), t);
  context.reportErrorOnStorageDirectory(sd);
 }
}

origin: org.apache.hadoop/hadoop-hdfs

  .setOndiskVersion(FSImageUtil.FILE_VERSION)
  .setLayoutVersion(
    context.getSourceNamesystem().getEffectiveLayoutVersion());
context.checkCancelled();
origin: org.apache.hadoop/hadoop-hdfs

context.checkCancelled();
origin: org.apache.hadoop/hadoop-hdfs

private void saveNameSystemSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 OutputStream out = sectionOutputStream;
 BlockIdManager blockIdManager = fsn.getBlockManager().getBlockIdManager();
 NameSystemSection.Builder b = NameSystemSection.newBuilder()
   .setGenstampV1(blockIdManager.getLegacyGenerationStamp())
   .setGenstampV1Limit(blockIdManager.getLegacyGenerationStampLimit())
   .setGenstampV2(blockIdManager.getGenerationStamp())
   .setLastAllocatedBlockId(blockIdManager.getLastAllocatedContiguousBlockId())
   .setLastAllocatedStripedBlockId(blockIdManager.getLastAllocatedStripedBlockId())
   .setTransactionId(context.getTxId());
 // We use the non-locked version of getNamespaceInfo here since
 // the coordinating thread of saveNamespace already has read-locked
 // the namespace for us. If we attempt to take another readlock
 // from the actual saver thread, there's a potential of a
 // fairness-related deadlock. See the comments on HDFS-2223.
 b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
 if (fsn.isRollingUpgrade()) {
  b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
 }
 NameSystemSection s = b.build();
 s.writeDelimitedTo(out);
 commitSection(summary, SectionName.NS_INFO);
}
origin: org.apache.hadoop/hadoop-hdfs

Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) {
 this.parent = parent;
 this.summary = summary;
 this.context = parent.getContext();
 this.fsn = context.getSourceNamesystem();
 this.numImageErrors = 0;
}
origin: com.facebook.hadoop/hadoop-core

 attemptRestoreRemovedStorage();
 saveNamespaceContext.set(namesystem, editLog.getLastWrittenTxId());
 savers.clear();
 if (saveNamespaceContext.isCancelled()) {
  processIOError(saveNamespaceContext.getErrorSDs());
  deleteCancelledCheckpoint();
  if (!editLog.isOpen()) editLog.open();  
  saveNamespaceContext.checkCancelled();
 processIOError(saveNamespaceContext.getErrorSDs());
 ckptState = CheckpointStates.UPLOAD_DONE;
} finally {
 saveNamespaceContext.clear();
origin: com.facebook.hadoop/hadoop-core

this.checkpointTime = FSNamesystem.now();
List<Thread> savers = new ArrayList<Thread>();
saveNamespaceContext.set(namesystem, editLog.getLastWrittenTxId());
 } catch (Exception e) {
  LOG.error("Error upgrading " + sd.getRoot(), e);
  saveNamespaceContext.reportErrorOnStorageDirectory(sd);
  continue;
 if (saveNamespaceContext.getErrorSDs().contains(sd)) continue;
 try {
 } catch (IOException ioe) {
  LOG.error("Error upgrading " + sd.getRoot(), ioe);
  saveNamespaceContext.reportErrorOnStorageDirectory(sd);
  continue;
 LOG.info("Upgrade of " + sd.getRoot() + " is complete.");
saveNamespaceContext.clear();
processIOError(saveNamespaceContext.getErrorSDs());
initializeDistributedUpgrade();
editLog.open();
saveNamespaceContext.clear();
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Save FSimage in the legacy format. This is not for NN consumption,
 * but for tools like OIV.
 */
public void saveLegacyOIVImage(FSNamesystem source, String targetDir,
  Canceler canceler) throws IOException {
 FSImageCompression compression =
   FSImageCompression.createCompression(conf);
 long txid = getCorrectLastAppliedOrWrittenTxId();
 SaveNamespaceContext ctx = new SaveNamespaceContext(source, txid,
   canceler);
 FSImageFormat.Saver saver = new FSImageFormat.Saver(ctx);
 String imageFileName = NNStorage.getLegacyOIVImageFileName(txid);
 File imageFile = new File(targetDir, imageFileName);
 saver.save(imageFile, compression);
 archivalManager.purgeOldLegacyOIVImages(targetDir, txid);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Save the contents of the FS image to the file.
 */
void saveFSImage(SaveNamespaceContext context, StorageDirectory sd,
  NameNodeFile dstType) throws IOException {
 long txid = context.getTxId();
 File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
 File dstFile = NNStorage.getStorageFile(sd, dstType, txid);
 
 FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
 FSImageCompression compression = FSImageCompression.createCompression(conf);
 long numErrors = saver.save(newFile, compression);
 if (numErrors > 0) {
  // The image is likely corrupted.
  LOG.error("Detected " + numErrors + " errors while saving FsImage " +
    dstFile);
  exitAfterSave.set(true);
 }
 MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
 storage.setMostRecentCheckpointInfo(txid, Time.now());
}
origin: org.apache.hadoop/hadoop-hdfs

void serializeINodeSection(OutputStream out) throws IOException {
 INodeMap inodesMap = fsn.dir.getINodeMap();
 INodeSection.Builder b = INodeSection.newBuilder()
   .setLastInodeId(fsn.dir.getLastInodeId()).setNumInodes(inodesMap.size());
 INodeSection s = b.build();
 s.writeDelimitedTo(out);
 int i = 0;
 Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
 while (iter.hasNext()) {
  INodeWithAdditionalFields n = iter.next();
  save(out, n);
  ++i;
  if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
   context.checkCancelled();
  }
 }
 parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE);
}
origin: com.facebook.hadoop/hadoop-core

checkNotSaved();
final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
FSDirectory fsDir = sourceNamesystem.dir;
long startTime = now();
 out.writeLong(fsDir.rootDir.numItemsInTree());
 out.writeLong(sourceNamesystem.getGenerationStamp());
 out.writeLong(context.getTxId());
origin: org.apache.hadoop/hadoop-hdfs

/**
 * @return number of non-fatal errors detected while saving the image.
 * @throws IOException on fatal error.
 */
private long saveSnapshots(FileSummary.Builder summary) throws IOException {
 FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver(
   this, summary, context, context.getSourceNamesystem());
 snapshotSaver.serializeSnapshotSection(sectionOutputStream);
 // Skip snapshot-related sections when there is no snapshot.
 if (context.getSourceNamesystem().getSnapshotManager()
   .getNumSnapshots() > 0) {
  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
 }
 snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
 return snapshotSaver.getNumImageErrors();
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Save FSimage in the legacy format. This is not for NN consumption,
 * but for tools like OIV.
 */
public void saveLegacyOIVImage(FSNamesystem source, String targetDir,
  Canceler canceler) throws IOException {
 FSImageCompression compression =
   FSImageCompression.createCompression(conf);
 long txid = getCorrectLastAppliedOrWrittenTxId();
 SaveNamespaceContext ctx = new SaveNamespaceContext(source, txid,
   canceler);
 FSImageFormat.Saver saver = new FSImageFormat.Saver(ctx);
 String imageFileName = NNStorage.getLegacyOIVImageFileName(txid);
 File imageFile = new File(targetDir, imageFileName);
 saver.save(imageFile, compression);
 archivalManager.purgeOldLegacyOIVImages(targetDir, txid);
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Save the contents of the FS image to the file.
 */
void saveFSImage(SaveNamespaceContext context, StorageDirectory sd,
  NameNodeFile dstType) throws IOException {
 long txid = context.getTxId();
 File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
 File dstFile = NNStorage.getStorageFile(sd, dstType, txid);
 
 FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
 FSImageCompression compression = FSImageCompression.createCompression(conf);
 saver.save(newFile, compression);
 
 MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
 storage.setMostRecentCheckpointInfo(txid, Time.now());
}
origin: io.prestosql.hadoop/hadoop-apache

 canceler = new Canceler();
SaveNamespaceContext ctx = new SaveNamespaceContext(
  source, txid, canceler);
 storage.reportErrorsOnDirectories(ctx.getErrorSDs());
  ctx.checkCancelled(); // throws
  assert false : "should have thrown above!";
 ctx.markComplete();
 ctx = null;
origin: ch.cern.hadoop/hadoop-hdfs

void save(File newFile, FSImageCompression compression) throws IOException {
 checkNotSaved();
 final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
 final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
 final long numINodes = rootDir.getDirectoryWithQuotaFeature()
  out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
  out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId());
  out.writeLong(context.getTxId());
  out.writeLong(sourceNamesystem.dir.getLastInodeId());
  context.checkCancelled();
  sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
  context.checkCancelled();
  sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
  context.checkCancelled();
  out.flush();
  context.checkCancelled();
  fout.getChannel().force(true);
 } finally {
origin: org.apache.hadoop/hadoop-hdfs

/**
 * save all the snapshot diff to fsimage
 */
public void serializeSnapshotDiffSection(OutputStream out)
  throws IOException {
 INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
 final List<INodeReference> refList = parent.getSaverContext()
   .getRefList();
 int i = 0;
 Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
 while (iter.hasNext()) {
  INodeWithAdditionalFields inode = iter.next();
  if (inode.isFile()) {
   serializeFileDiffList(inode.asFile(), out);
  } else if (inode.isDirectory()) {
   serializeDirDiffList(inode.asDirectory(), refList, out);
  }
  ++i;
  if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
   context.checkCancelled();
  }
 }
 parent.commitSection(headers,
   FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
origin: io.prestosql.hadoop/hadoop-apache

private void saveNameSystemSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 OutputStream out = sectionOutputStream;
 BlockIdManager blockIdManager = fsn.getBlockIdManager();
 NameSystemSection.Builder b = NameSystemSection.newBuilder()
   .setGenstampV1(blockIdManager.getGenerationStampV1())
   .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit())
   .setGenstampV2(blockIdManager.getGenerationStampV2())
   .setLastAllocatedBlockId(blockIdManager.getLastAllocatedBlockId())
   .setTransactionId(context.getTxId());
 // We use the non-locked version of getNamespaceInfo here since
 // the coordinating thread of saveNamespace already has read-locked
 // the namespace for us. If we attempt to take another readlock
 // from the actual saver thread, there's a potential of a
 // fairness-related deadlock. See the comments on HDFS-2223.
 b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
 if (fsn.isRollingUpgrade()) {
  b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
 }
 NameSystemSection s = b.build();
 s.writeDelimitedTo(out);
 commitSection(summary, SectionName.NS_INFO);
}
org.apache.hadoop.hdfs.server.namenodeSaveNamespaceContext

Javadoc

Context for an ongoing SaveNamespace operation. This class allows cancellation, and also is responsible for accumulating failed storage directories.

Most used methods

  • <init>
  • checkCancelled
  • getErrorSDs
  • getSourceNamesystem
  • getTxId
  • reportErrorOnStorageDirectory
  • markComplete
  • cancel
    Requests that the current saveNamespace operation be canceled if it is still running.
  • clear
  • isCancelled
  • set
  • set

Popular in Java

  • Reactive rest calls using spring rest template
  • onCreateOptionsMenu (Activity)
  • notifyDataSetChanged (ArrayAdapter)
  • runOnUiThread (Activity)
  • Path (java.nio.file)
  • NumberFormat (java.text)
    The abstract base class for all number formats. This class provides the interface for formatting and
  • Arrays (java.util)
    This class contains various methods for manipulating arrays (such as sorting and searching). This cl
  • TimeZone (java.util)
    TimeZone represents a time zone offset, and also figures out daylight savings. Typically, you get a
  • ThreadPoolExecutor (java.util.concurrent)
    An ExecutorService that executes each submitted task using one of possibly several pooled threads, n
  • SAXParseException (org.xml.sax)
    Encapsulate an XML parse error or warning.> This module, both source code and documentation, is in t
  • Sublime Text for Python
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now