congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
AccessControlList
Code IndexAdd Tabnine to your IDE (free)

How to use
AccessControlList
in
org.apache.hadoop.security.authorize

Best Java code snippets using org.apache.hadoop.security.authorize.AccessControlList (Showing top 20 results out of 540)

Refine searchRefine arrow

  • UserGroupInformation
  • Configuration
origin: org.apache.hadoop/hadoop-common

 new IdentityHashMap<Class<?>, MachineList[]>();
String defaultAcl = conf.get(
  CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL,
  AccessControlList.WILDCARD_ACL_VALUE);
String defaultBlockedAcl = conf.get(
 CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL, "");
String defaultMachineList = conf.get(defaultServiceHostsKey,
 MachineList.WILDCARD_VALUE);
String defaultBlockedMachineList= conf.get(
 for (Service service : services) {
  AccessControlList acl =
    new AccessControlList(
      conf.get(service.getServiceKey(),
        defaultAcl)
    );
  AccessControlList blockedAcl =
    new AccessControlList(
    conf.get(service.getServiceKey() + BLOCKED,
    defaultBlockedAcl));
origin: apache/hbase

@Test
public void testRequiresAuthorizationAccess() throws Exception {
 Configuration conf = new Configuration();
 ServletContext context = Mockito.mock(ServletContext.class);
 Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
 HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
 HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 //requires admin access to instrumentation, FALSE by default
 Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response));
 //requires admin access to instrumentation, TRUE
 conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
 conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
 AccessControlList acls = Mockito.mock(AccessControlList.class);
 Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
 Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
 Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
}
origin: org.apache.hadoop/hadoop-common

/**
 * Serializes the AccessControlList object
 */
@Override
public void write(DataOutput out) throws IOException {
 String aclString = getAclString();
 Text.writeString(out, aclString);
}
origin: org.apache.hadoop/hadoop-common

/**
 * Remove user from the names of users allowed for this service.
 * 
 * @param user
 *          The user name
 */
public void removeUser(String user) {
 if (isWildCardACLValue(user)) {
  throw new IllegalArgumentException("User " + user + " can not be removed");
 }
 if (!isAllAllowed()) {
  users.remove(user);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

this.conf = conf;
Configuration confForInfoServer = new Configuration(conf);
confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY,
  HTTP_MAX_THREADS);
confForInfoServer.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY,
  HTTP_SELECTOR_THREADS);
confForInfoServer.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY,
  .setName("datanode")
  .setConf(confForInfoServer)
  .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
  .hostName(getHostnameForSpnegoPrincipal(confForInfoServer))
  .addEndpoint(URI.create("http://localhost:" + proxyPort))
origin: org.apache.hadoop/hadoop-mapred

ACLsManager(Configuration conf, JobACLsManager jobACLsManager,
  QueueManager queueManager) throws IOException {
 mrOwner = UserGroupInformation.getCurrentUser();
 adminAcl = new AccessControlList(conf.get(MRConfig.MR_ADMINS, " "));
 adminAcl.addUser(mrOwner.getShortUserName());
 
 String deprecatedSuperGroup = conf.get(MRConfig.MR_SUPERGROUP);
 if (deprecatedSuperGroup != null) {
  LOG.warn(MRConfig.MR_SUPERGROUP + " is deprecated. Use " 
    + MRConfig.MR_ADMINS + " instead");
  adminAcl.addGroup(deprecatedSuperGroup);
 }
 aclsEnabled = conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
 this.jobACLsManager = jobACLsManager;
 this.queueManager = queueManager;
}
origin: apache/hbase

@Ignore
public void testAuthorizationOfDefaultServlets() throws Exception {
 Configuration conf = new Configuration();
 conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
   true);
 conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
   true);
 conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY,
   .setACL(new AccessControlList("userA,userB groupC,groupD")).build();
 myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
 myServer.start();
origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager

private AccessControlList getAdminAclList(Configuration conf) {
 AccessControlList aclList =
   new AccessControlList(conf.get(YarnConfiguration.YARN_ADMIN_ACL,
    YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
 aclList.addUser(daemonUser.getShortUserName());
 return aclList;
}
origin: ch.cern.hadoop/hadoop-mapreduce-client-hs

@Override
public void refreshAdminAcls() throws IOException {
 UserGroupInformation user = checkAcls("refreshAdminAcls");
 Configuration conf = createConf();
 adminAcl = new AccessControlList(conf.get(JHAdminConfig.JHS_ADMIN_ACL,
   JHAdminConfig.DEFAULT_JHS_ADMIN_ACL));
 HSAuditLogger.logSuccess(user.getShortUserName(), "refreshAdminAcls",
   HISTORY_ADMIN_SERVER);
}
origin: org.apache.hadoop/hadoop-common

  try {
   clientPrincipal = SecurityUtil.getServerPrincipal(
     conf.get(clientKey), addr);
  } catch (IOException e) {
   throw (AuthorizationException) new AuthorizationException(
if((clientPrincipal != null && !clientPrincipal.equals(user.getUserName())) || 
  acls.length != 2  || !acls[0].isUserAllowed(user) || acls[1].isUserAllowed(user)) {
 String cause = clientPrincipal != null ?
   ": this service is only accessible by " + clientPrincipal :
origin: org.apache.hadoop/hadoop-hdfs

  .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
  .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
  .setUsernameConfKey(spnegoUserNameKey)
  .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
if (UserGroupInformation.isSecurityEnabled()) {
 LOG.info("Starting web server as: "
   + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
     httpAddr.getHostName()));
origin: ch.cern.hadoop/hadoop-mapreduce-client-hs

private UserGroupInformation checkAcls(String method) throws IOException {
 UserGroupInformation user;
 try {
  user = UserGroupInformation.getCurrentUser();
 } catch (IOException ioe) {
  LOG.warn("Couldn't get current user", ioe);
  HSAuditLogger.logFailure("UNKNOWN", method, adminAcl.toString(),
    HISTORY_ADMIN_SERVER, "Couldn't get current user");
  throw ioe;
 }
 if (!adminAcl.isUserAllowed(user)) {
  LOG.warn("User " + user.getShortUserName() + " doesn't have permission"
    + " to call '" + method + "'");
  HSAuditLogger.logFailure(user.getShortUserName(), method,
    adminAcl.toString(), HISTORY_ADMIN_SERVER,
    AuditConstants.UNAUTHORIZED_USER);
  throw new AccessControlException("User " + user.getShortUserName()
    + " doesn't have permission" + " to call '" + method + "'");
 }
 LOG.info("HS Admin: " + method + " invoked by user "
   + user.getShortUserName());
 return user;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
protected void checkRpcAdminAccess() throws IOException, AccessControlException {
 UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
 UserGroupInformation zkfcUgi = UserGroupInformation.getLoginUser();
 if (adminAcl.isUserAllowed(ugi) ||
   ugi.getShortUserName().equals(zkfcUgi.getShortUserName())) {
  LOG.info("Allowed RPC access from " + ugi + " at " + Server.getRemoteAddress());
  return;
 }
 String msg = "Disallowed RPC access from " + ugi + " at " +
   Server.getRemoteAddress() + ". Not listed in " + DFSConfigKeys.DFS_ADMIN; 
 LOG.warn(msg);
 throw new AccessControlException(msg);
}
origin: org.apache.hadoop/hadoop-common

@Override
public void authorize(UserGroupInformation user, 
  String remoteAddress) throws AuthorizationException {
 
 if (user == null) {
  throw new IllegalArgumentException("user is null.");
 }
 UserGroupInformation realUser = user.getRealUser();
 if (realUser == null) {
  return;
 }
 
 AccessControlList acl = proxyUserAcl.get(configPrefix +
   realUser.getShortUserName());
 if (acl == null || !acl.isUserAllowed(user)) {
  throw new AuthorizationException("User: " + realUser.getUserName()
    + " is not allowed to impersonate " + user.getUserName());
 }
 MachineList MachineList = proxyHosts.get(
   getProxySuperuserIpConfKey(realUser.getShortUserName()));
 if(MachineList == null || !MachineList.includes(remoteAddress)) {
  throw new AuthorizationException("Unauthorized connection for super-user: "
    + realUser.getUserName() + " from IP " + remoteAddress);
 }
}

origin: org.apache.hadoop/hadoop-mapred

/**
 * Rereads the config to get hosts and exclude list file names.
 * Rereads the files to update the hosts and exclude lists.
 */
public synchronized void refreshNodes() throws IOException {
 String user = UserGroupInformation.getCurrentUser().getShortUserName();
 // check access
 if (!aclsManager.isMRAdmin(UserGroupInformation.getCurrentUser())) {
  AuditLogger.logFailure(user, Constants.REFRESH_NODES,
    aclsManager.getAdminsAcl().toString(), Constants.JOBTRACKER,
    Constants.UNAUTHORIZED_USER);
  throw new AccessControlException(user + 
                   " is not authorized to refresh nodes.");
 }
 
 AuditLogger.logSuccess(user, Constants.REFRESH_NODES, Constants.JOBTRACKER);
 // call the actual api
 refreshHosts();
}
origin: io.hops/hadoop-mapreduce-client-hs

@Override
public void refreshLoadedJobCache() throws IOException {
 UserGroupInformation user = checkAcls("refreshLoadedJobCache");
 try {
  jobHistoryService.refreshLoadedJobCache();
 } catch (UnsupportedOperationException e) {
  HSAuditLogger.logFailure(user.getShortUserName(),
    "refreshLoadedJobCache", adminAcl.toString(), HISTORY_ADMIN_SERVER,
    e.getMessage());
  throw e;
 }
 HSAuditLogger.logSuccess(user.getShortUserName(), "refreshLoadedJobCache",
   HISTORY_ADMIN_SERVER);
}
origin: org.apache.hadoop/hadoop-mapreduce-client-core

assertEquals(
  firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job")
    .toString(),
  "Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue = iterator.next();
when(mockUGI.getShortUserName()).thenReturn("user1");
String[] groups = { "group1" };
when(mockUGI.getGroupNames()).thenReturn(groups);
assertTrue(manager.hasAccess("first", QueueACL.SUBMIT_JOB, mockUGI));
assertFalse(manager.hasAccess("second", QueueACL.SUBMIT_JOB, mockUGI));
assertFalse(manager.hasAccess("first", QueueACL.ADMINISTER_JOBS, mockUGI));
when(mockUGI.getShortUserName()).thenReturn("user3");
assertTrue(manager.hasAccess("first", QueueACL.ADMINISTER_JOBS, mockUGI));
conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY);
QueueManager.dumpConfiguration(writer, f.getAbsolutePath(), conf);
String result = writer.toString();
origin: apache/hbase

/**
 * Get the admin ACLs from the given ServletContext and check if the given
 * user is in the ACL.
 *
 * @param servletContext the context containing the admin ACL.
 * @param remoteUser the remote user to check for.
 * @return true if the user is present in the ACL, false if no ACL is set or
 *         the user is not present
 */
public static boolean userHasAdministratorAccess(ServletContext servletContext,
  String remoteUser) {
 AccessControlList adminsAcl = (AccessControlList) servletContext
   .getAttribute(ADMINS_ACL);
 UserGroupInformation remoteUserUGI =
   UserGroupInformation.createRemoteUser(remoteUser);
 return adminsAcl != null && adminsAcl.isUserAllowed(remoteUserUGI);
}
origin: org.apache.hadoop/hadoop-kms

private boolean checkKeyAccess(Map<KeyOpType, AccessControlList> keyAcl,
  UserGroupInformation ugi, KeyOpType opType) {
 AccessControlList acl = keyAcl.get(opType);
 if (acl == null) {
  // If no acl is specified for this operation,
  // deny access
  LOG.debug("No ACL available for key, denying access for {}", opType);
  return false;
 } else {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Checking user [{}] for: {}: {}" + ugi.getShortUserName(),
     opType.toString(), acl.getAclString());
  }
  return acl.isUserAllowed(ugi);
 }
}
origin: org.apache.hadoop/hadoop-common-test

/**
 * Tests adding user/group to an wild card acl.
 */
public void testAddRemoveToWildCardACL() {
 AccessControlList acl = new AccessControlList(" * ");
 assertTrue(acl.isAllAllowed());
 UserGroupInformation drwho =
  UserGroupInformation.createUserForTesting("drwho@APACHE.ORG",
    new String[] { "aliens" });
 UserGroupInformation drwho2 =
  UserGroupInformation.createUserForTesting("drwho2@APACHE.ORG",
    new String[] { "tardis" });
 acl.addUser("drwho");
 assertTrue(acl.isAllAllowed());
 assertFalse(acl.getAclString().contains("drwho"));
 acl.addGroup("tardis");
 assertTrue(acl.isAllAllowed());
 assertFalse(acl.getAclString().contains("tardis"));
  acl.removeUser("drwho");
 assertTrue(acl.isAllAllowed());
 assertUserAllowed(drwho, acl);
 acl.removeGroup("tardis");
 assertTrue(acl.isAllAllowed());
 assertUserAllowed(drwho2, acl);
}
org.apache.hadoop.security.authorizeAccessControlList

Javadoc

Class representing a configured access control list.

Most used methods

  • <init>
    Construct a new ACL from String representation of users and groups The arguments are comma separated
  • isUserAllowed
  • getAclString
    Returns the access control list as a String that can be used for building a new instance by sending
  • addUser
    Add user to the names of users allowed for this service.
  • isAllAllowed
  • toString
    Returns descriptive way of users and groups that are part of this ACL. Use #getAclString() to get th
  • getGroups
  • addGroup
    Add group to the names of groups allowed for this service.
  • isUserInList
    Checks if a user represented by the provided UserGroupInformationis a member of the Access Control L
  • buildACL
    Build ACL from the given two Strings. The Strings contain comma separated values.
  • getGroupsString
    Returns comma-separated concatenated single String of the set 'groups'
  • getString
    Returns comma-separated concatenated single String of all strings of the given set
  • getGroupsString,
  • getString,
  • getUsersString,
  • isWildCardACLValue,
  • readFields,
  • write,
  • getUsers,
  • removeGroup,
  • removeUser

Popular in Java

  • Finding current android device location
  • getSystemService (Context)
  • putExtra (Intent)
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • IOException (java.io)
    Signals a general, I/O-related error. Error details may be specified when calling the constructor, a
  • SocketTimeoutException (java.net)
    This exception is thrown when a timeout expired on a socket read or accept operation.
  • ByteBuffer (java.nio)
    A buffer for bytes. A byte buffer can be created in either one of the following ways: * #allocate
  • Executors (java.util.concurrent)
    Factory and utility methods for Executor, ExecutorService, ScheduledExecutorService, ThreadFactory,
  • JFileChooser (javax.swing)
  • Logger (org.apache.log4j)
    This is the central class in the log4j package. Most logging operations, except configuration, are d
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now