Tabnine Logo
FSPermissionChecker.isSuperUser
Code IndexAdd Tabnine to your IDE (free)

How to use
isSuperUser
method
in
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.isSuperUser (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

 void checkPermission(INodeDirectory inode, int snapshotId, FsAction access)
   throws AccessControlException {
  if (dir != null && dir.isPermissionEnabled()
    && pc != null && !pc.isSuperUser()) {
   pc.checkPermission(inode, snapshotId, access);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Verify if the caller has the required permission. This will result into 
 * an exception if the caller is not allowed to access the resource.
 */
public void checkSuperuserPrivilege()
  throws AccessControlException {
 if (!isSuperUser()) {
  throw new AccessControlException("Access denied for user " 
    + getUser() + ". Superuser privilege is required");
 }
}

origin: org.apache.hadoop/hadoop-hdfs

 static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
   List<XAttr> xAttrs, boolean isRawPath) {
  assert xAttrs != null : "xAttrs can not be null";
  if (xAttrs.isEmpty()) {
   return xAttrs;
  }
  
  List<XAttr> filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
  final boolean isSuperUser = pc.isSuperUser();
  for (XAttr xAttr : xAttrs) {
   if (xAttr.getNameSpace() == XAttr.NameSpace.USER) {
    filteredXAttrs.add(xAttr);
   } else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && 
     isSuperUser) {
    filteredXAttrs.add(xAttr);
   } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) {
    filteredXAttrs.add(xAttr);
   } else if (XAttrHelper.getPrefixedName(xAttr).
     equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
    filteredXAttrs.add(xAttr);
   }
  }
  return filteredXAttrs;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip)
  throws IOException {
 if (pc.isSuperUser()) {
  if (FSDirXAttrOp.getXAttrByPrefixedName(this, iip,
    SECURITY_XATTR_UNREADABLE_BY_SUPERUSER) != null) {
   throw new AccessControlException(
     "Access is denied for " + pc.getUser() + " since the superuser "
     + "is not allowed to perform this operation.");
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

static SnapshottableDirectoryStatus[] getSnapshottableDirListing(
  FSDirectory fsd, FSPermissionChecker pc, SnapshotManager snapshotManager)
  throws IOException {
 fsd.readLock();
 try {
  final String user = pc.isSuperUser()? null : pc.getUser();
  return snapshotManager.getSnapshottableDirListing(user);
 } finally {
  fsd.readUnlock();
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Check whether current user have permissions to access the path. For more
 * details of the parameters, see
 * {@link FSPermissionChecker#checkPermission}.
 */
void checkPermission(FSPermissionChecker pc, INodesInPath iip,
  boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess,
  FsAction access, FsAction subAccess, boolean ignoreEmptyDir)
  throws AccessControlException {
 if (!pc.isSuperUser()) {
  readLock();
  try {
   pc.checkPermission(iip, doCheckOwner, ancestorAccess,
     parentAccess, access, subAccess, ignoreEmptyDir);
  } finally {
   readUnlock();
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

DirOp dirOp = resolveLink ? DirOp.READ : DirOp.READ_LINK;
final INodesInPath iip;
if (pc.isSuperUser()) {
origin: org.apache.hadoop/hadoop-hdfs

static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr,
  boolean isRawPath)
  throws AccessControlException {
 final boolean isSuperUser = pc.isSuperUser();
 if (xAttr.getNameSpace() == XAttr.NameSpace.USER || 
   (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) {
  return;
 }
 if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) {
  return;
 }
 if (XAttrHelper.getPrefixedName(xAttr).
   equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
  if (xAttr.getValue() != null) {
   throw new AccessControlException("Attempt to set a value for '" +
     SECURITY_XATTR_UNREADABLE_BY_SUPERUSER +
     "'. Values are not allowed for this xattr.");
  }
  return;
 }
 throw new AccessControlException("User doesn't have permission for xattr: "
   + XAttrHelper.getPrefixedName(xAttr));
}
origin: org.apache.hadoop/hadoop-hdfs

private static void checkXAttrChangeAccess(
  FSDirectory fsd, INodesInPath iip, XAttr xAttr,
  FSPermissionChecker pc)
  throws AccessControlException, FileNotFoundException {
 if (fsd.isPermissionEnabled() && xAttr.getNameSpace() == XAttr.NameSpace
   .USER) {
  final INode inode = iip.getLastINode();
  if (inode != null &&
    inode.isDirectory() &&
    inode.getFsPermission().getStickyBit()) {
   if (!pc.isSuperUser()) {
    fsd.checkOwner(pc, iip);
   }
  } else {
   fsd.checkPathAccess(pc, iip, FsAction.WRITE);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

   UnresolvedPathException, ParentNotDirectoryException {
try {
 if (pc == null || pc.isSuperUser()) {
  checkSimpleTraverse(iip);
 } else {
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Whether a cache pool can be accessed by the current context
 *
 * @param pool CachePool being accessed
 * @param access type of action being performed on the cache pool
 * @throws AccessControlException if pool cannot be accessed
 */
public void checkPermission(CachePool pool, FsAction access)
  throws AccessControlException {
 FsPermission mode = pool.getMode();
 if (isSuperUser()) {
  return;
 }
 if (getUser().equals(pool.getOwnerName())
   && mode.getUserAction().implies(access)) {
  return;
 }
 if (isMemberOfGroup(pool.getGroupName())
   && mode.getGroupAction().implies(access)) {
  return;
 }
 if (!getUser().equals(pool.getOwnerName())
   && !isMemberOfGroup(pool.getGroupName())
   && mode.getOtherAction().implies(access)) {
  return;
 }
 throw new AccessControlException("Permission denied while accessing pool "
   + pool.getPoolName() + ": user " + getUser() + " does not have "
   + access.toString() + " permissions.");
}
origin: org.apache.hadoop/hadoop-hdfs

static FileStatus setOwner(
  FSDirectory fsd, FSPermissionChecker pc, String src, String username,
  String group) throws IOException {
 if (FSDirectory.isExactReservedName(src)) {
  throw new InvalidPathException(src);
 }
 INodesInPath iip;
 fsd.writeLock();
 try {
  iip = fsd.resolvePath(pc, src, DirOp.WRITE);
  fsd.checkOwner(pc, iip);
  if (!pc.isSuperUser()) {
   if (username != null && !pc.getUser().equals(username)) {
    throw new AccessControlException("User " + pc.getUser()
      + " is not a super user (non-super user cannot change owner).");
   }
   if (group != null && !pc.isMemberOfGroup(group)) {
    throw new AccessControlException(
      "User " + pc.getUser() + " does not belong to " + group);
   }
  }
  unprotectedSetOwner(fsd, iip, username, group);
 } finally {
  fsd.writeUnlock();
 }
 fsd.getEditLog().logSetOwner(iip.getPath(), username, group);
 return fsd.getAuditFileInfo(iip);
}
origin: org.apache.hadoop/hadoop-hdfs

static DirectoryListing getListingInt(FSDirectory fsd, FSPermissionChecker pc,
  final String srcArg, byte[] startAfter, boolean needLocation)
  throws IOException {
 final INodesInPath iip = fsd.resolvePath(pc, srcArg, DirOp.READ);
 // Get file name when startAfter is an INodePath.  This is not the
 // common case so avoid any unnecessary processing unless required.
 if (startAfter.length > 0 && startAfter[0] == Path.SEPARATOR_CHAR) {
  final String startAfterString = DFSUtil.bytes2String(startAfter);
  if (FSDirectory.isReservedName(startAfterString)) {
   try {
    byte[][] components = INode.getPathComponents(startAfterString);
    components = FSDirectory.resolveComponents(components, fsd);
    startAfter = components[components.length - 1];
   } catch (IOException e) {
    // Possibly the inode is deleted
    throw new DirectoryListingStartAfterNotFoundException(
      "Can't find startAfter " + startAfterString);
   }
  }
 }
 boolean isSuperUser = true;
 if (fsd.isPermissionEnabled()) {
  if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
   fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
  }
  isSuperUser = pc.isSuperUser();
 }
 return getListing(fsd, iip, startAfter, needLocation, isSuperUser);
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Verify if the caller has the required permission. This will result into 
 * an exception if the caller is not allowed to access the resource.
 */
public void checkSuperuserPrivilege()
  throws AccessControlException {
 if (!isSuperUser()) {
  throw new AccessControlException("Access denied for user " 
    + getUser() + ". Superuser privilege is required");
 }
}

origin: io.prestosql.hadoop/hadoop-apache

/**
 * Verify if the caller has the required permission. This will result into 
 * an exception if the caller is not allowed to access the resource.
 */
public void checkSuperuserPrivilege()
  throws AccessControlException {
 if (!isSuperUser()) {
  throw new AccessControlException("Access denied for user " 
    + getUser() + ". Superuser privilege is required");
 }
}

origin: io.prestosql.hadoop/hadoop-apache

private void checkUnreadableBySuperuser(FSPermissionChecker pc,
  INode inode, int snapshotId)
  throws IOException {
 if (pc.isSuperUser()) {
  for (XAttr xattr : FSDirXAttrOp.getXAttrs(dir, inode, snapshotId)) {
   if (XAttrHelper.getPrefixName(xattr).
     equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
    throw new AccessControlException("Access is denied for " +
      pc.getUser() + " since the superuser is not allowed to " +
      "perform this operation.");
   }
  }
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

private void checkUnreadableBySuperuser(FSPermissionChecker pc,
  INode inode, int snapshotId)
  throws IOException {
 if (pc.isSuperUser()) {
  for (XAttr xattr : FSDirXAttrOp.getXAttrs(dir, inode, snapshotId)) {
   if (XAttrHelper.getPrefixName(xattr).
     equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
    throw new AccessControlException("Access is denied for " +
      pc.getUser() + " since the superuser is not allowed to " +
      "perform this operation.");
   }
  }
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

static SnapshottableDirectoryStatus[] getSnapshottableDirListing(
  FSDirectory fsd, SnapshotManager snapshotManager) throws IOException {
 FSPermissionChecker pc = fsd.getPermissionChecker();
 fsd.readLock();
 try {
  final String user = pc.isSuperUser()? null : pc.getUser();
  return snapshotManager.getSnapshottableDirListing(user);
 } finally {
  fsd.readUnlock();
 }
}
origin: io.prestosql.hadoop/hadoop-apache

static SnapshottableDirectoryStatus[] getSnapshottableDirListing(
  FSDirectory fsd, SnapshotManager snapshotManager) throws IOException {
 FSPermissionChecker pc = fsd.getPermissionChecker();
 fsd.readLock();
 try {
  final String user = pc.isSuperUser()? null : pc.getUser();
  return snapshotManager.getSnapshottableDirListing(user);
 } finally {
  fsd.readUnlock();
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

private static void checkXAttrChangeAccess(
  FSDirectory fsd, INodesInPath iip, XAttr xAttr,
  FSPermissionChecker pc)
  throws AccessControlException, FileNotFoundException {
 if (fsd.isPermissionEnabled() && xAttr.getNameSpace() == XAttr.NameSpace
   .USER) {
  final INode inode = iip.getLastINode();
  if (inode != null &&
    inode.isDirectory() &&
    inode.getFsPermission().getStickyBit()) {
   if (!pc.isSuperUser()) {
    fsd.checkOwner(pc, iip);
   }
  } else {
   fsd.checkPathAccess(pc, iip, FsAction.WRITE);
  }
 }
}
org.apache.hadoop.hdfs.server.namenodeFSPermissionCheckerisSuperUser

Popular methods of FSPermissionChecker

  • checkPermission
    Check whether current user have permissions to access the path. Traverse is always checked. Parent p
  • <init>
  • check
    Guarded by FSNamesystem#readLock()
  • checkOwner
    Guarded by FSNamesystem#readLock()
  • checkSubAccess
    Guarded by FSNamesystem#readLock()
  • checkTraverse
    Guarded by FSNamesystem#readLock()
  • checkStickyBit
    Guarded by FSNamesystem#readLock()
  • checkSuperuserPrivilege
    Verify if the caller has the required permission. This will result into an exception if the caller i
  • getAccessControlEnforcer
  • getAttributesProvider
  • getINodeAttrs
  • getUser
  • getINodeAttrs,
  • getUser,
  • hasAclPermission,
  • hasPermission,
  • toAccessControlString,
  • constructPath,
  • containsGroup,
  • isMemberOfGroup,
  • checkIsDirectory

Popular in Java

  • Updating database using SQL prepared statement
  • setContentView (Activity)
  • getSharedPreferences (Context)
  • getApplicationContext (Context)
  • ConnectException (java.net)
    A ConnectException is thrown if a connection cannot be established to a remote host on a specific po
  • NumberFormat (java.text)
    The abstract base class for all number formats. This class provides the interface for formatting and
  • Collection (java.util)
    Collection is the root of the collection hierarchy. It defines operations on data collections and t
  • ImageIO (javax.imageio)
  • HttpServlet (javax.servlet.http)
    Provides an abstract class to be subclassed to create an HTTP servlet suitable for a Web site. A sub
  • Table (org.hibernate.mapping)
    A relational table
  • Best plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now