canceler = new Canceler(); SaveNamespaceContext ctx = new SaveNamespaceContext( source, txid, canceler); storage.reportErrorsOnDirectories(ctx.getErrorSDs()); ctx.checkCancelled(); // throws assert false : "should have thrown above!"; ctx.markComplete(); ctx = null;
void save(File newFile, FSImageCompression compression) throws IOException { checkNotSaved(); final FSNamesystem sourceNamesystem = context.getSourceNamesystem(); final INodeDirectory rootDir = sourceNamesystem.dir.rootDir; final long numINodes = rootDir.getDirectoryWithQuotaFeature() out.writeLong(blockIdManager.getGenerationStampAtblockIdSwitch()); out.writeLong(blockIdManager.getLastAllocatedContiguousBlockId()); out.writeLong(context.getTxId()); out.writeLong(sourceNamesystem.dir.getLastInodeId()); context.checkCancelled(); sourceNamesystem.saveSecretManagerStateCompat(out, sdPath); context.checkCancelled(); sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath); context.checkCancelled(); out.flush(); context.checkCancelled(); fout.getChannel().force(true); } finally {
@Override public void run() { try { saveFSImage(context, sd, nnf); } catch (SaveNamespaceCancelledException snce) { LOG.info("Cancelled image saving for " + sd.getRoot() + ": " + snce.getMessage()); // don't report an error on the storage dir! } catch (Throwable t) { LOG.error("Unable to save image for " + sd.getRoot(), t); context.reportErrorOnStorageDirectory(sd); } }
.setOndiskVersion(FSImageUtil.FILE_VERSION) .setLayoutVersion( context.getSourceNamesystem().getEffectiveLayoutVersion()); context.checkCancelled();
private void saveNameSystemSection(FileSummary.Builder summary) throws IOException { final FSNamesystem fsn = context.getSourceNamesystem(); OutputStream out = sectionOutputStream; BlockIdManager blockIdManager = fsn.getBlockManager().getBlockIdManager(); NameSystemSection.Builder b = NameSystemSection.newBuilder() .setGenstampV1(blockIdManager.getLegacyGenerationStamp()) .setGenstampV1Limit(blockIdManager.getLegacyGenerationStampLimit()) .setGenstampV2(blockIdManager.getGenerationStamp()) .setLastAllocatedBlockId(blockIdManager.getLastAllocatedContiguousBlockId()) .setLastAllocatedStripedBlockId(blockIdManager.getLastAllocatedStripedBlockId()) .setTransactionId(context.getTxId()); // We use the non-locked version of getNamespaceInfo here since // the coordinating thread of saveNamespace already has read-locked // the namespace for us. If we attempt to take another readlock // from the actual saver thread, there's a potential of a // fairness-related deadlock. See the comments on HDFS-2223. b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID()); if (fsn.isRollingUpgrade()) { b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime()); } NameSystemSection s = b.build(); s.writeDelimitedTo(out); commitSection(summary, SectionName.NS_INFO); }
Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) { this.parent = parent; this.summary = summary; this.context = parent.getContext(); this.fsn = context.getSourceNamesystem(); this.numImageErrors = 0; }
attemptRestoreRemovedStorage(); saveNamespaceContext.set(namesystem, editLog.getLastWrittenTxId()); savers.clear(); if (saveNamespaceContext.isCancelled()) { processIOError(saveNamespaceContext.getErrorSDs()); deleteCancelledCheckpoint(); if (!editLog.isOpen()) editLog.open(); saveNamespaceContext.checkCancelled(); processIOError(saveNamespaceContext.getErrorSDs()); ckptState = CheckpointStates.UPLOAD_DONE; } finally { saveNamespaceContext.clear();
this.checkpointTime = FSNamesystem.now(); List<Thread> savers = new ArrayList<Thread>(); saveNamespaceContext.set(namesystem, editLog.getLastWrittenTxId()); } catch (Exception e) { LOG.error("Error upgrading " + sd.getRoot(), e); saveNamespaceContext.reportErrorOnStorageDirectory(sd); continue; if (saveNamespaceContext.getErrorSDs().contains(sd)) continue; try { } catch (IOException ioe) { LOG.error("Error upgrading " + sd.getRoot(), ioe); saveNamespaceContext.reportErrorOnStorageDirectory(sd); continue; LOG.info("Upgrade of " + sd.getRoot() + " is complete."); saveNamespaceContext.clear(); processIOError(saveNamespaceContext.getErrorSDs()); initializeDistributedUpgrade(); editLog.open(); saveNamespaceContext.clear();
/** * Save FSimage in the legacy format. This is not for NN consumption, * but for tools like OIV. */ public void saveLegacyOIVImage(FSNamesystem source, String targetDir, Canceler canceler) throws IOException { FSImageCompression compression = FSImageCompression.createCompression(conf); long txid = getCorrectLastAppliedOrWrittenTxId(); SaveNamespaceContext ctx = new SaveNamespaceContext(source, txid, canceler); FSImageFormat.Saver saver = new FSImageFormat.Saver(ctx); String imageFileName = NNStorage.getLegacyOIVImageFileName(txid); File imageFile = new File(targetDir, imageFileName); saver.save(imageFile, compression); archivalManager.purgeOldLegacyOIVImages(targetDir, txid); }
/** * Save the contents of the FS image to the file. */ void saveFSImage(SaveNamespaceContext context, StorageDirectory sd, NameNodeFile dstType) throws IOException { long txid = context.getTxId(); File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid); File dstFile = NNStorage.getStorageFile(sd, dstType, txid); FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context); FSImageCompression compression = FSImageCompression.createCompression(conf); long numErrors = saver.save(newFile, compression); if (numErrors > 0) { // The image is likely corrupted. LOG.error("Detected " + numErrors + " errors while saving FsImage " + dstFile); exitAfterSave.set(true); } MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest()); storage.setMostRecentCheckpointInfo(txid, Time.now()); }
void serializeINodeSection(OutputStream out) throws IOException { INodeMap inodesMap = fsn.dir.getINodeMap(); INodeSection.Builder b = INodeSection.newBuilder() .setLastInodeId(fsn.dir.getLastInodeId()).setNumInodes(inodesMap.size()); INodeSection s = b.build(); s.writeDelimitedTo(out); int i = 0; Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator(); while (iter.hasNext()) { INodeWithAdditionalFields n = iter.next(); save(out, n); ++i; if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } } parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE); }
checkNotSaved(); final FSNamesystem sourceNamesystem = context.getSourceNamesystem(); FSDirectory fsDir = sourceNamesystem.dir; long startTime = now(); out.writeLong(fsDir.rootDir.numItemsInTree()); out.writeLong(sourceNamesystem.getGenerationStamp()); out.writeLong(context.getTxId());
/** * @return number of non-fatal errors detected while saving the image. * @throws IOException on fatal error. */ private long saveSnapshots(FileSummary.Builder summary) throws IOException { FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver( this, summary, context, context.getSourceNamesystem()); snapshotSaver.serializeSnapshotSection(sectionOutputStream); // Skip snapshot-related sections when there is no snapshot. if (context.getSourceNamesystem().getSnapshotManager() .getNumSnapshots() > 0) { snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream); } snapshotSaver.serializeINodeReferenceSection(sectionOutputStream); return snapshotSaver.getNumImageErrors(); }
/** * Save FSimage in the legacy format. This is not for NN consumption, * but for tools like OIV. */ public void saveLegacyOIVImage(FSNamesystem source, String targetDir, Canceler canceler) throws IOException { FSImageCompression compression = FSImageCompression.createCompression(conf); long txid = getCorrectLastAppliedOrWrittenTxId(); SaveNamespaceContext ctx = new SaveNamespaceContext(source, txid, canceler); FSImageFormat.Saver saver = new FSImageFormat.Saver(ctx); String imageFileName = NNStorage.getLegacyOIVImageFileName(txid); File imageFile = new File(targetDir, imageFileName); saver.save(imageFile, compression); archivalManager.purgeOldLegacyOIVImages(targetDir, txid); }
/** * Save the contents of the FS image to the file. */ void saveFSImage(SaveNamespaceContext context, StorageDirectory sd, NameNodeFile dstType) throws IOException { long txid = context.getTxId(); File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid); File dstFile = NNStorage.getStorageFile(sd, dstType, txid); FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context); FSImageCompression compression = FSImageCompression.createCompression(conf); saver.save(newFile, compression); MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest()); storage.setMostRecentCheckpointInfo(txid, Time.now()); }
canceler = new Canceler(); SaveNamespaceContext ctx = new SaveNamespaceContext( source, txid, canceler); storage.reportErrorsOnDirectories(ctx.getErrorSDs()); ctx.checkCancelled(); // throws assert false : "should have thrown above!"; ctx.markComplete(); ctx = null;
void save(File newFile, FSImageCompression compression) throws IOException { checkNotSaved(); final FSNamesystem sourceNamesystem = context.getSourceNamesystem(); final INodeDirectory rootDir = sourceNamesystem.dir.rootDir; final long numINodes = rootDir.getDirectoryWithQuotaFeature() out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch()); out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId()); out.writeLong(context.getTxId()); out.writeLong(sourceNamesystem.dir.getLastInodeId()); context.checkCancelled(); sourceNamesystem.saveSecretManagerStateCompat(out, sdPath); context.checkCancelled(); sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath); context.checkCancelled(); out.flush(); context.checkCancelled(); fout.getChannel().force(true); } finally {
/** * save all the snapshot diff to fsimage */ public void serializeSnapshotDiffSection(OutputStream out) throws IOException { INodeMap inodesMap = fsn.getFSDirectory().getINodeMap(); final List<INodeReference> refList = parent.getSaverContext() .getRefList(); int i = 0; Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator(); while (iter.hasNext()) { INodeWithAdditionalFields inode = iter.next(); if (inode.isFile()) { serializeFileDiffList(inode.asFile(), out); } else if (inode.isDirectory()) { serializeDirDiffList(inode.asDirectory(), refList, out); } ++i; if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } } parent.commitSection(headers, FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF); }
private void saveNameSystemSection(FileSummary.Builder summary) throws IOException { final FSNamesystem fsn = context.getSourceNamesystem(); OutputStream out = sectionOutputStream; BlockIdManager blockIdManager = fsn.getBlockIdManager(); NameSystemSection.Builder b = NameSystemSection.newBuilder() .setGenstampV1(blockIdManager.getGenerationStampV1()) .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit()) .setGenstampV2(blockIdManager.getGenerationStampV2()) .setLastAllocatedBlockId(blockIdManager.getLastAllocatedBlockId()) .setTransactionId(context.getTxId()); // We use the non-locked version of getNamespaceInfo here since // the coordinating thread of saveNamespace already has read-locked // the namespace for us. If we attempt to take another readlock // from the actual saver thread, there's a potential of a // fairness-related deadlock. See the comments on HDFS-2223. b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID()); if (fsn.isRollingUpgrade()) { b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime()); } NameSystemSection s = b.build(); s.writeDelimitedTo(out); commitSection(summary, SectionName.NS_INFO); }