Tabnine Logo
org.apache.hadoop.hdfs.server.namenode
Code IndexAdd Tabnine to your IDE (free)

How to use org.apache.hadoop.hdfs.server.namenode

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Update the quota usage after deletion. The quota update is only necessary
 * when image/edits have been loaded and the file/dir to be deleted is not
 * contained in snapshots.
 */
void updateCountForDelete(final INode inode, final INodesInPath iip) {
 if (getFSNamesystem().isImageLoaded() &&
   !inode.isInLatestSnapshot(iip.getLatestSnapshotId())) {
  QuotaCounts counts = inode.computeQuotaUsage(getBlockStoragePolicySuite());
  unprotectedUpdateCount(iip, iip.length() - 1, counts.negation());
 }
}
origin: org.apache.hadoop/hadoop-hdfs

static void modifyCachePool(
  FSNamesystem fsn, CacheManager cacheManager, CachePoolInfo req,
  boolean logRetryCache) throws IOException {
 final FSPermissionChecker pc = getFsPermissionChecker(fsn);
 if (pc != null) {
  pc.checkSuperuserPrivilege();
 }
 cacheManager.modifyCachePool(req);
 fsn.getEditLog().logModifyCachePool(req, logRetryCache);
}
origin: org.apache.hadoop/hadoop-hdfs

public void logAppendFile(String path, INodeFile file, boolean newBlock,
  boolean toLogRpcIds) {
 FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
 assert uc != null;
 AppendOp op = AppendOp.getInstance(cache.get()).setPath(path)
   .setClientName(uc.getClientName())
   .setClientMachine(uc.getClientMachine())
   .setNewBlock(newBlock);
 logRpcIds(op, toLogRpcIds);
 logEdit(op);
}
origin: org.apache.hadoop/hadoop-hdfs

 @Override
 public boolean metadataEquals(INodeDirectoryAttributes other) {
  return other != null
    && getQuotaCounts().equals(other.getQuotaCounts())
    && getPermissionLong() == other.getPermissionLong()
    && getAclFeature() == other.getAclFeature()
    && getXAttrFeature() == other.getXAttrFeature();
 }
}
origin: org.apache.hadoop/hadoop-hdfs

private void save(OutputStream out, INodeDirectory n) throws IOException {
 INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n,
   parent.getSaverContext());
 INodeSection.INode r = buildINodeCommon(n)
   .setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build();
 r.writeDelimitedTo(out);
}
origin: org.apache.hadoop/hadoop-hdfs

/** Update the root node's attributes */
private void updateRootAttr(INodeWithAdditionalFields root) {                                                           
 final QuotaCounts q = root.getQuotaCounts();
 final long nsQuota = q.getNameSpace();
 final long dsQuota = q.getStorageSpace();
 FSDirectory fsDir = namesystem.dir;
 if (nsQuota != -1 || dsQuota != -1) {
  fsDir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
 }
 fsDir.rootDir.cloneModificationTime(root);
 fsDir.rootDir.clonePermissionStatus(root);    
}

origin: org.apache.hadoop/hadoop-hdfs

static CachePoolInfo addCachePool(
  FSNamesystem fsn, CacheManager cacheManager, CachePoolInfo req,
  boolean logRetryCache)
  throws IOException {
 final FSPermissionChecker pc = getFsPermissionChecker(fsn);
 if (pc != null) {
  pc.checkSuperuserPrivilege();
 }
 CachePoolInfo info = cacheManager.addCachePool(req);
 fsn.getEditLog().logAddCachePool(info, logRetryCache);
 return info;
}
origin: org.apache.hadoop/hadoop-hdfs

private void loadStringTableSection(InputStream in) throws IOException {
 StringTableSection s = StringTableSection.parseDelimitedFrom(in);
 ctx.stringTable =
   SerialNumberManager.newStringTable(s.getNumEntry(), s.getMaskBits());
 for (int i = 0; i < s.getNumEntry(); ++i) {
  StringTableSection.Entry e = StringTableSection.Entry
    .parseDelimitedFrom(in);
  ctx.stringTable.put(e.getId(), e.getStr());
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/** Add set quota by storage type record to edit log */
void logSetQuotaByStorageType(String src, long dsQuota, StorageType type) {
 SetQuotaByStorageTypeOp op = SetQuotaByStorageTypeOp.getInstance(cache.get())
  .setSource(src)
  .setQuotaByStorageType(dsQuota, type);
 logEdit(op);
}
origin: org.apache.hadoop/hadoop-hdfs

void logAddCachePool(CachePoolInfo pool, boolean toLogRpcIds) {
 AddCachePoolOp op =
   AddCachePoolOp.getInstance(cache.get()).setPool(pool);
 logRpcIds(op, toLogRpcIds);
 logEdit(op);
}
origin: org.apache.hadoop/hadoop-hdfs

void logModifyCachePool(CachePoolInfo info, boolean toLogRpcIds) {
 ModifyCachePoolOp op =
   ModifyCachePoolOp.getInstance(cache.get()).setInfo(info);
 logRpcIds(op, toLogRpcIds);
 logEdit(op);
}
origin: org.apache.hadoop/hadoop-hdfs

Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile) {
 assert hasWriteLock();
 pendingFile.getFileUnderConstructionFeature().setClientName(newHolder);
 return leaseManager.reassignLease(lease, pendingFile, newHolder);
}
origin: org.apache.hadoop/hadoop-hdfs

static void modifyCacheDirective(
  FSNamesystem fsn, CacheManager cacheManager, CacheDirectiveInfo directive,
  EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException {
 final FSPermissionChecker pc = getFsPermissionChecker(fsn);
 cacheManager.modifyDirective(directive, pc, flags);
 fsn.getEditLog().logModifyCacheDirectiveInfo(directive, logRetryCache);
}
origin: apache/hive

KeyProviderCryptoExtension keyProvider =  miniDFSCluster.getNameNode(0).getNamesystem().getProvider();
if (keyProvider != null) {
 try {
origin: org.apache.hadoop/hadoop-hdfs

public synchronized void doPreUpgradeOfSharedLog() throws IOException {
 for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
  if (jas.isShared()) {
   jas.getManager().doPreUpgrade();
  }
 }
}

origin: org.apache.hadoop/hadoop-hdfs

void logFinalizeRollingUpgrade(long finalizeTime) {
 RollingUpgradeOp op = RollingUpgradeFinalizeOp.getInstance(cache.get());
 op.setTime(finalizeTime);
 logEdit(op);
}
origin: org.apache.hadoop/hadoop-hdfs

private static AclFeatureProto.Builder buildAclEntries(AclFeature f) {
 AclFeatureProto.Builder b = AclFeatureProto.newBuilder();
 for (int pos = 0, e; pos < f.getEntriesSize(); pos++) {
  e = f.getEntryAt(pos);
  b.addEntries(e);
 }
 return b;
}
origin: org.apache.hadoop/hadoop-hdfs

public synchronized void discardSegments(long markerTxid)
  throws IOException {
 for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
  jas.getManager().discardSegments(markerTxid);
 }
}
origin: apache/hbase

@Test
public void testMovedWALDuringRecovery() throws Exception {
 // This partial mock will throw LEE for every file simulating
 // files that were moved
 FileSystem spiedFs = Mockito.spy(fs);
 // The "File does not exist" part is very important,
 // that's how it comes out of HDFS
 Mockito.doThrow(new LeaseExpiredException("Injected: File does not exist")).
   when(spiedFs).append(Mockito.<Path>any());
 retryOverHdfsProblem(spiedFs);
}
origin: apache/hbase

public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
  throws Exception {
 createDirsAndSetProperties();
 EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
 // Error level to skip some warnings specific to the minicluster. See HBASE-4709
 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
   setLevel(org.apache.log4j.Level.ERROR);
 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
   setLevel(org.apache.log4j.Level.ERROR);
 TraceUtil.initTracer(conf);
 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
   true, null, racks, hosts, null);
 // Set this just-started cluster as our filesystem.
 setFs();
 // Wait for the cluster to be totally up
 this.dfsCluster.waitClusterUp();
 //reset the test directory for test file system
 dataTestDirOnTestFS = null;
 String dataTestDir = getDataTestDir().toString();
 conf.set(HConstants.HBASE_DIR, dataTestDir);
 LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
 return this.dfsCluster;
}
org.apache.hadoop.hdfs.server.namenode

Most used classes

  • NameNode
    NameNode serves as both directory namespace manager and "inode table" for the Hadoop DFS. There is a
  • FSNamesystem
    FSNamesystem does the actual bookkeeping work for the DataNode. It tracks several important tables.
  • LeaseExpiredException
    The lease that was being used to create this file has expired.
  • SecondaryNameNode
    The Secondary NameNode is a helper to the primary NameNode. The Secondary is responsible for support
  • EditLogFileOutputStream
    An implementation of the abstract class EditLogOutputStream, which stores edits in a local file.
  • FSDirectory,
  • FSEditLog,
  • FSImage,
  • INode,
  • INodeDirectory,
  • INodeFile,
  • LeaseManager,
  • TransferFsImage,
  • NameNodeMetrics,
  • CheckpointSignature,
  • EditLogFileInputStream,
  • EditLogInputStream,
  • FSEditLogLoader,
  • FSImageCompression
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now