Tabnine Logo
SaveNamespaceContext.getSourceNamesystem
Code IndexAdd Tabnine to your IDE (free)

How to use
getSourceNamesystem
method
in
org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext.getSourceNamesystem (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) {
 this.parent = parent;
 this.summary = summary;
 this.context = parent.getContext();
 this.fsn = context.getSourceNamesystem();
 this.numImageErrors = 0;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * @return number of non-fatal errors detected while saving the image.
 * @throws IOException on fatal error.
 */
private long saveSnapshots(FileSummary.Builder summary) throws IOException {
 FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver(
   this, summary, context, context.getSourceNamesystem());
 snapshotSaver.serializeSnapshotSection(sectionOutputStream);
 // Skip snapshot-related sections when there is no snapshot.
 if (context.getSourceNamesystem().getSnapshotManager()
   .getNumSnapshots() > 0) {
  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
 }
 snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
 return snapshotSaver.getNumImageErrors();
}
origin: org.apache.hadoop/hadoop-hdfs

private void saveSecretManagerSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 DelegationTokenSecretManager.SecretManagerState state = fsn
   .saveSecretManagerState();
 state.section.writeDelimitedTo(sectionOutputStream);
 for (SecretManagerSection.DelegationKey k : state.keys)
  k.writeDelimitedTo(sectionOutputStream);
 for (SecretManagerSection.PersistToken t : state.tokens)
  t.writeDelimitedTo(sectionOutputStream);
 commitSection(summary, SectionName.SECRET_MANAGER);
}
origin: org.apache.hadoop/hadoop-hdfs

private void saveErasureCodingSection(
  FileSummary.Builder summary) throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 ErasureCodingPolicyInfo[] ecPolicies =
   fsn.getErasureCodingPolicyManager().getPolicies();
 ArrayList<ErasureCodingPolicyProto> ecPolicyProtoes =
   new ArrayList<ErasureCodingPolicyProto>();
 for (ErasureCodingPolicyInfo p : ecPolicies) {
  ecPolicyProtoes.add(PBHelperClient.convertErasureCodingPolicy(p));
 }
 ErasureCodingSection section = ErasureCodingSection.newBuilder().
   addAllPolicies(ecPolicyProtoes).build();
 section.writeDelimitedTo(sectionOutputStream);
 commitSection(summary, SectionName.ERASURE_CODING);
}
origin: org.apache.hadoop/hadoop-hdfs

private void saveCacheManagerSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 CacheManager.PersistState state = fsn.getCacheManager().saveState();
 state.section.writeDelimitedTo(sectionOutputStream);
 for (CachePoolInfoProto p : state.pools)
  p.writeDelimitedTo(sectionOutputStream);
 for (CacheDirectiveInfoProto p : state.directives)
  p.writeDelimitedTo(sectionOutputStream);
 commitSection(summary, SectionName.CACHE_MANAGER);
}
origin: org.apache.hadoop/hadoop-hdfs

void save(File newFile, FSImageCompression compression) throws IOException {
 checkNotSaved();
 final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
 final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
 final long numINodes = rootDir.getDirectoryWithQuotaFeature()
origin: org.apache.hadoop/hadoop-hdfs

.setOndiskVersion(FSImageUtil.FILE_VERSION)
.setLayoutVersion(
  context.getSourceNamesystem().getEffectiveLayoutVersion());
origin: ch.cern.hadoop/hadoop-hdfs

Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) {
 this.parent = parent;
 this.summary = summary;
 this.context = parent.getContext();
 this.fsn = context.getSourceNamesystem();
}
origin: io.prestosql.hadoop/hadoop-apache

Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) {
 this.parent = parent;
 this.summary = summary;
 this.context = parent.getContext();
 this.fsn = context.getSourceNamesystem();
}
origin: org.apache.hadoop/hadoop-hdfs

private void saveNameSystemSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 OutputStream out = sectionOutputStream;
 BlockIdManager blockIdManager = fsn.getBlockManager().getBlockIdManager();
 NameSystemSection.Builder b = NameSystemSection.newBuilder()
   .setGenstampV1(blockIdManager.getLegacyGenerationStamp())
   .setGenstampV1Limit(blockIdManager.getLegacyGenerationStampLimit())
   .setGenstampV2(blockIdManager.getGenerationStamp())
   .setLastAllocatedBlockId(blockIdManager.getLastAllocatedContiguousBlockId())
   .setLastAllocatedStripedBlockId(blockIdManager.getLastAllocatedStripedBlockId())
   .setTransactionId(context.getTxId());
 // We use the non-locked version of getNamespaceInfo here since
 // the coordinating thread of saveNamespace already has read-locked
 // the namespace for us. If we attempt to take another readlock
 // from the actual saver thread, there's a potential of a
 // fairness-related deadlock. See the comments on HDFS-2223.
 b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
 if (fsn.isRollingUpgrade()) {
  b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
 }
 NameSystemSection s = b.build();
 s.writeDelimitedTo(out);
 commitSection(summary, SectionName.NS_INFO);
}
origin: ch.cern.hadoop/hadoop-hdfs

private void saveSnapshots(FileSummary.Builder summary) throws IOException {
 FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver(
   this, summary, context, context.getSourceNamesystem());
 snapshotSaver.serializeSnapshotSection(sectionOutputStream);
 // Skip snapshot-related sections when there is no snapshot.
 if (context.getSourceNamesystem().getSnapshotManager()
   .getNumSnapshots() > 0) {
  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
 }
 snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
}
origin: io.prestosql.hadoop/hadoop-apache

private void saveSnapshots(FileSummary.Builder summary) throws IOException {
 FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver(
   this, summary, context, context.getSourceNamesystem());
 snapshotSaver.serializeSnapshotSection(sectionOutputStream);
 // Skip snapshot-related sections when there is no snapshot.
 if (context.getSourceNamesystem().getSnapshotManager()
   .getNumSnapshots() > 0) {
  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
 }
 snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
}
origin: ch.cern.hadoop/hadoop-hdfs

private void saveSecretManagerSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 DelegationTokenSecretManager.SecretManagerState state = fsn
   .saveSecretManagerState();
 state.section.writeDelimitedTo(sectionOutputStream);
 for (SecretManagerSection.DelegationKey k : state.keys)
  k.writeDelimitedTo(sectionOutputStream);
 for (SecretManagerSection.PersistToken t : state.tokens)
  t.writeDelimitedTo(sectionOutputStream);
 commitSection(summary, SectionName.SECRET_MANAGER);
}
origin: io.prestosql.hadoop/hadoop-apache

private void saveSecretManagerSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 DelegationTokenSecretManager.SecretManagerState state = fsn
   .saveSecretManagerState();
 state.section.writeDelimitedTo(sectionOutputStream);
 for (SecretManagerSection.DelegationKey k : state.keys)
  k.writeDelimitedTo(sectionOutputStream);
 for (SecretManagerSection.PersistToken t : state.tokens)
  t.writeDelimitedTo(sectionOutputStream);
 commitSection(summary, SectionName.SECRET_MANAGER);
}
origin: com.facebook.hadoop/hadoop-core

checkNotSaved();
final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
FSDirectory fsDir = sourceNamesystem.dir;
long startTime = now();
origin: ch.cern.hadoop/hadoop-hdfs

private void saveCacheManagerSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 CacheManager.PersistState state = fsn.getCacheManager().saveState();
 state.section.writeDelimitedTo(sectionOutputStream);
 for (CachePoolInfoProto p : state.pools)
  p.writeDelimitedTo(sectionOutputStream);
 for (CacheDirectiveInfoProto p : state.directives)
  p.writeDelimitedTo(sectionOutputStream);
 commitSection(summary, SectionName.CACHE_MANAGER);
}
origin: io.prestosql.hadoop/hadoop-apache

private void saveCacheManagerSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 CacheManager.PersistState state = fsn.getCacheManager().saveState();
 state.section.writeDelimitedTo(sectionOutputStream);
 for (CachePoolInfoProto p : state.pools)
  p.writeDelimitedTo(sectionOutputStream);
 for (CacheDirectiveInfoProto p : state.directives)
  p.writeDelimitedTo(sectionOutputStream);
 commitSection(summary, SectionName.CACHE_MANAGER);
}
origin: ch.cern.hadoop/hadoop-hdfs

void save(File newFile, FSImageCompression compression) throws IOException {
 checkNotSaved();
 final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
 final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
 final long numINodes = rootDir.getDirectoryWithQuotaFeature()
origin: io.prestosql.hadoop/hadoop-apache

private void saveNameSystemSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 OutputStream out = sectionOutputStream;
 BlockIdManager blockIdManager = fsn.getBlockIdManager();
 NameSystemSection.Builder b = NameSystemSection.newBuilder()
   .setGenstampV1(blockIdManager.getGenerationStampV1())
   .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit())
   .setGenstampV2(blockIdManager.getGenerationStampV2())
   .setLastAllocatedBlockId(blockIdManager.getLastAllocatedBlockId())
   .setTransactionId(context.getTxId());
 // We use the non-locked version of getNamespaceInfo here since
 // the coordinating thread of saveNamespace already has read-locked
 // the namespace for us. If we attempt to take another readlock
 // from the actual saver thread, there's a potential of a
 // fairness-related deadlock. See the comments on HDFS-2223.
 b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
 if (fsn.isRollingUpgrade()) {
  b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
 }
 NameSystemSection s = b.build();
 s.writeDelimitedTo(out);
 commitSection(summary, SectionName.NS_INFO);
}
origin: ch.cern.hadoop/hadoop-hdfs

private void saveNameSystemSection(FileSummary.Builder summary)
  throws IOException {
 final FSNamesystem fsn = context.getSourceNamesystem();
 OutputStream out = sectionOutputStream;
 BlockIdManager blockIdManager = fsn.getBlockIdManager();
 NameSystemSection.Builder b = NameSystemSection.newBuilder()
   .setGenstampV1(blockIdManager.getGenerationStampV1())
   .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit())
   .setGenstampV2(blockIdManager.getGenerationStampV2())
   .setLastAllocatedBlockId(blockIdManager.getLastAllocatedBlockId())
   .setTransactionId(context.getTxId());
 // We use the non-locked version of getNamespaceInfo here since
 // the coordinating thread of saveNamespace already has read-locked
 // the namespace for us. If we attempt to take another readlock
 // from the actual saver thread, there's a potential of a
 // fairness-related deadlock. See the comments on HDFS-2223.
 b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
 if (fsn.isRollingUpgrade()) {
  b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
 }
 NameSystemSection s = b.build();
 s.writeDelimitedTo(out);
 commitSection(summary, SectionName.NS_INFO);
}
org.apache.hadoop.hdfs.server.namenodeSaveNamespaceContextgetSourceNamesystem

Popular methods of SaveNamespaceContext

  • <init>
  • checkCancelled
  • getErrorSDs
  • getTxId
  • reportErrorOnStorageDirectory
  • markComplete
  • cancel
    Requests that the current saveNamespace operation be canceled if it is still running.
  • clear
  • isCancelled
  • set

Popular in Java

  • Parsing JSON documents to java classes using gson
  • startActivity (Activity)
  • notifyDataSetChanged (ArrayAdapter)
  • requestLocationUpdates (LocationManager)
  • BorderLayout (java.awt)
    A border layout lays out a container, arranging and resizing its components to fit in five regions:
  • Menu (java.awt)
  • Charset (java.nio.charset)
    A charset is a named mapping between Unicode characters and byte sequences. Every Charset can decode
  • SimpleDateFormat (java.text)
    Formats and parses dates in a locale-sensitive manner. Formatting turns a Date into a String, and pa
  • StringTokenizer (java.util)
    Breaks a string into tokens; new code should probably use String#split.> // Legacy code: StringTo
  • Manifest (java.util.jar)
    The Manifest class is used to obtain attribute information for a JarFile and its entries.
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now