Tabnine Logo
FSPermissionChecker.getUser
Code IndexAdd Tabnine to your IDE (free)

How to use
getUser
method
in
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.getUser (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/** Return true when sticky bit is violated. */
private boolean isStickyBitViolated(INodeAttributes parent,
                  INodeAttributes inode) {
 // If this user is the directory owner, return
 if (parent.getUserName().equals(getUser())) {
  return false;
 }
 // if this user is the file owner, return
 if (inode.getUserName().equals(getUser())) {
  return false;
 }
 return true;
}
origin: org.apache.hadoop/hadoop-hdfs

/** Guarded by {@link FSNamesystem#readLock()} */
private void checkOwner(INodeAttributes[] inodes, byte[][] components, int i)
  throws AccessControlException {
 if (getUser().equals(inodes[i].getUserName())) {
  return;
 }
 throw new AccessControlException(
   "Permission denied. user=" + getUser() +
   " is not the owner of inode=" + getPath(components, 0, i));
}
origin: org.apache.hadoop/hadoop-hdfs

/** @return a string for throwing {@link AccessControlException} */
private String toAccessControlString(INodeAttributes inodeAttrib,
  String path, FsAction access, boolean deniedFromAcl) {
 StringBuilder sb = new StringBuilder("Permission denied: ")
  .append("user=").append(getUser()).append(", ")
  .append("access=").append(access).append(", ")
  .append("inode=\"").append(path).append("\":")
  .append(inodeAttrib.getUserName()).append(':')
  .append(inodeAttrib.getGroupName()).append(':')
  .append(inodeAttrib.isDirectory() ? 'd' : '-')
  .append(inodeAttrib.getFsPermission());
 if (deniedFromAcl) {
  sb.append("+");
 }
 return sb.toString();
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Verify if the caller has the required permission. This will result into 
 * an exception if the caller is not allowed to access the resource.
 */
public void checkSuperuserPrivilege()
  throws AccessControlException {
 if (!isSuperUser()) {
  throw new AccessControlException("Access denied for user " 
    + getUser() + ". Superuser privilege is required");
 }
}

origin: org.apache.hadoop/hadoop-hdfs

void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip)
  throws IOException {
 if (pc.isSuperUser()) {
  if (FSDirXAttrOp.getXAttrByPrefixedName(this, iip,
    SECURITY_XATTR_UNREADABLE_BY_SUPERUSER) != null) {
   throw new AccessControlException(
     "Access is denied for " + pc.getUser() + " since the superuser "
     + "is not allowed to perform this operation.");
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

static SnapshottableDirectoryStatus[] getSnapshottableDirListing(
  FSDirectory fsd, FSPermissionChecker pc, SnapshotManager snapshotManager)
  throws IOException {
 fsd.readLock();
 try {
  final String user = pc.isSuperUser()? null : pc.getUser();
  return snapshotManager.getSnapshottableDirListing(user);
 } finally {
  fsd.readUnlock();
 }
}
origin: org.apache.hadoop/hadoop-hdfs

static FileStatus setOwner(
  FSDirectory fsd, FSPermissionChecker pc, String src, String username,
  String group) throws IOException {
 if (FSDirectory.isExactReservedName(src)) {
  throw new InvalidPathException(src);
 }
 INodesInPath iip;
 fsd.writeLock();
 try {
  iip = fsd.resolvePath(pc, src, DirOp.WRITE);
  fsd.checkOwner(pc, iip);
  if (!pc.isSuperUser()) {
   if (username != null && !pc.getUser().equals(username)) {
    throw new AccessControlException("User " + pc.getUser()
      + " is not a super user (non-super user cannot change owner).");
   }
   if (group != null && !pc.isMemberOfGroup(group)) {
    throw new AccessControlException(
      "User " + pc.getUser() + " does not belong to " + group);
   }
  }
  unprotectedSetOwner(fsd, iip, username, group);
 } finally {
  fsd.writeUnlock();
 }
 fsd.getEditLog().logSetOwner(iip.getPath(), username, group);
 return fsd.getAuditFileInfo(iip);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Whether a cache pool can be accessed by the current context
 *
 * @param pool CachePool being accessed
 * @param access type of action being performed on the cache pool
 * @throws AccessControlException if pool cannot be accessed
 */
public void checkPermission(CachePool pool, FsAction access)
  throws AccessControlException {
 FsPermission mode = pool.getMode();
 if (isSuperUser()) {
  return;
 }
 if (getUser().equals(pool.getOwnerName())
   && mode.getUserAction().implies(access)) {
  return;
 }
 if (isMemberOfGroup(pool.getGroupName())
   && mode.getGroupAction().implies(access)) {
  return;
 }
 if (!getUser().equals(pool.getOwnerName())
   && !isMemberOfGroup(pool.getGroupName())
   && mode.getOtherAction().implies(access)) {
  return;
 }
 throw new AccessControlException("Permission denied while accessing pool "
   + pool.getPoolName() + ": user " + getUser() + " does not have "
   + access.toString() + " permissions.");
}
origin: org.apache.hadoop/hadoop-hdfs

private boolean hasPermission(INodeAttributes inode, FsAction access) {
 if (inode == null) {
  return true;
 }
 final FsPermission mode = inode.getFsPermission();
 final AclFeature aclFeature = inode.getAclFeature();
 if (aclFeature != null && aclFeature.getEntriesSize() > 0) {
  // It's possible that the inode has a default ACL but no access ACL.
  int firstEntry = aclFeature.getEntryAt(0);
  if (AclEntryStatusFormat.getScope(firstEntry) == AclEntryScope.ACCESS) {
   return hasAclPermission(inode, access, mode, aclFeature);
  }
 }
 final FsAction checkAction;
 if (getUser().equals(inode.getUserName())) { //user class
  checkAction = mode.getUserAction();
 } else if (isMemberOfGroup(inode.getGroupName())) { //group class
  checkAction = mode.getGroupAction();
 } else { //other class
  checkAction = mode.getOtherAction();
 }
 return checkAction.implies(access);
}
origin: org.apache.hadoop/hadoop-hdfs

if (getUser().equals(inode.getUserName())) {
 if (mode.getUserAction().implies(access)) {
  return true;
   if (getUser().equals(name)) {
    FsAction masked = AclEntryStatusFormat.getPermission(entry).and(
      mode.getGroupAction());
origin: ch.cern.hadoop/hadoop-hdfs

/** Guarded by {@link FSNamesystem#readLock()} */
private void checkOwner(INodeAttributes[] inodes, int i)
  throws AccessControlException {
 if (getUser().equals(inodes[i].getUserName())) {
  return;
 }
 throw new AccessControlException(
   "Permission denied. user=" + getUser() +
   " is not the owner of inode=" + constructPath(inodes, i));
}
origin: io.prestosql.hadoop/hadoop-apache

/** Guarded by {@link FSNamesystem#readLock()} */
private void checkOwner(INodeAttributes[] inodes, int i)
  throws AccessControlException {
 if (getUser().equals(inodes[i].getUserName())) {
  return;
 }
 throw new AccessControlException(
   "Permission denied. user=" + getUser() +
   " is not the owner of inode=" + constructPath(inodes, i));
}
origin: io.prestosql.hadoop/hadoop-apache

/** @return a string for throwing {@link AccessControlException} */
private String toAccessControlString(INodeAttributes inodeAttrib,
  String path, FsAction access, boolean deniedFromAcl) {
 StringBuilder sb = new StringBuilder("Permission denied: ")
  .append("user=").append(getUser()).append(", ")
  .append("access=").append(access).append(", ")
  .append("inode=\"").append(path).append("\":")
  .append(inodeAttrib.getUserName()).append(':')
  .append(inodeAttrib.getGroupName()).append(':')
  .append(inodeAttrib.isDirectory() ? 'd' : '-')
  .append(inodeAttrib.getFsPermission());
 if (deniedFromAcl) {
  sb.append("+");
 }
 return sb.toString();
}
origin: ch.cern.hadoop/hadoop-hdfs

/** @return a string for throwing {@link AccessControlException} */
private String toAccessControlString(INodeAttributes inodeAttrib,
  String path, FsAction access, boolean deniedFromAcl) {
 StringBuilder sb = new StringBuilder("Permission denied: ")
  .append("user=").append(getUser()).append(", ")
  .append("access=").append(access).append(", ")
  .append("inode=\"").append(path).append("\":")
  .append(inodeAttrib.getUserName()).append(':')
  .append(inodeAttrib.getGroupName()).append(':')
  .append(inodeAttrib.isDirectory() ? 'd' : '-')
  .append(inodeAttrib.getFsPermission());
 if (deniedFromAcl) {
  sb.append("+");
 }
 return sb.toString();
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Verify if the caller has the required permission. This will result into 
 * an exception if the caller is not allowed to access the resource.
 */
public void checkSuperuserPrivilege()
  throws AccessControlException {
 if (!isSuperUser()) {
  throw new AccessControlException("Access denied for user " 
    + getUser() + ". Superuser privilege is required");
 }
}

origin: io.prestosql.hadoop/hadoop-apache

/**
 * Verify if the caller has the required permission. This will result into 
 * an exception if the caller is not allowed to access the resource.
 */
public void checkSuperuserPrivilege()
  throws AccessControlException {
 if (!isSuperUser()) {
  throw new AccessControlException("Access denied for user " 
    + getUser() + ". Superuser privilege is required");
 }
}

origin: ch.cern.hadoop/hadoop-hdfs

private void checkUnreadableBySuperuser(FSPermissionChecker pc,
  INode inode, int snapshotId)
  throws IOException {
 if (pc.isSuperUser()) {
  for (XAttr xattr : FSDirXAttrOp.getXAttrs(dir, inode, snapshotId)) {
   if (XAttrHelper.getPrefixName(xattr).
     equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
    throw new AccessControlException("Access is denied for " +
      pc.getUser() + " since the superuser is not allowed to " +
      "perform this operation.");
   }
  }
 }
}
origin: io.prestosql.hadoop/hadoop-apache

private void checkUnreadableBySuperuser(FSPermissionChecker pc,
  INode inode, int snapshotId)
  throws IOException {
 if (pc.isSuperUser()) {
  for (XAttr xattr : FSDirXAttrOp.getXAttrs(dir, inode, snapshotId)) {
   if (XAttrHelper.getPrefixName(xattr).
     equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
    throw new AccessControlException("Access is denied for " +
      pc.getUser() + " since the superuser is not allowed to " +
      "perform this operation.");
   }
  }
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

static SnapshottableDirectoryStatus[] getSnapshottableDirListing(
  FSDirectory fsd, SnapshotManager snapshotManager) throws IOException {
 FSPermissionChecker pc = fsd.getPermissionChecker();
 fsd.readLock();
 try {
  final String user = pc.isSuperUser()? null : pc.getUser();
  return snapshotManager.getSnapshottableDirListing(user);
 } finally {
  fsd.readUnlock();
 }
}
origin: io.prestosql.hadoop/hadoop-apache

static SnapshottableDirectoryStatus[] getSnapshottableDirListing(
  FSDirectory fsd, SnapshotManager snapshotManager) throws IOException {
 FSPermissionChecker pc = fsd.getPermissionChecker();
 fsd.readLock();
 try {
  final String user = pc.isSuperUser()? null : pc.getUser();
  return snapshotManager.getSnapshottableDirListing(user);
 } finally {
  fsd.readUnlock();
 }
}
org.apache.hadoop.hdfs.server.namenodeFSPermissionCheckergetUser

Popular methods of FSPermissionChecker

  • checkPermission
    Check whether current user have permissions to access the path. Traverse is always checked. Parent p
  • <init>
  • check
    Guarded by FSNamesystem#readLock()
  • checkOwner
    Guarded by FSNamesystem#readLock()
  • checkSubAccess
    Guarded by FSNamesystem#readLock()
  • checkTraverse
    Guarded by FSNamesystem#readLock()
  • checkStickyBit
    Guarded by FSNamesystem#readLock()
  • checkSuperuserPrivilege
    Verify if the caller has the required permission. This will result into an exception if the caller i
  • getAccessControlEnforcer
  • getAttributesProvider
  • getINodeAttrs
  • hasAclPermission
    Checks requested access against an Access Control List. This method relies on finding the ACL data i
  • getINodeAttrs,
  • hasAclPermission,
  • hasPermission,
  • isSuperUser,
  • toAccessControlString,
  • constructPath,
  • containsGroup,
  • isMemberOfGroup,
  • checkIsDirectory

Popular in Java

  • Updating database using SQL prepared statement
  • putExtra (Intent)
  • getContentResolver (Context)
  • setScale (BigDecimal)
  • Pointer (com.sun.jna)
    An abstraction for a native pointer data type. A Pointer instance represents, on the Java side, a na
  • ArrayList (java.util)
    ArrayList is an implementation of List, backed by an array. All optional operations including adding
  • List (java.util)
    An ordered collection (also known as a sequence). The user of this interface has precise control ove
  • TreeSet (java.util)
    TreeSet is an implementation of SortedSet. All optional operations (adding and removing) are support
  • JLabel (javax.swing)
  • LogFactory (org.apache.commons.logging)
    Factory for creating Log instances, with discovery and configuration features similar to that employ
  • Best plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now