/** * Returns the result of {@code UserGroupInformation.isSecurityEnabled()}. */ public static boolean isSecurityEnabled() { return UserGroupInformation.isSecurityEnabled(); } }
public static String getRootNamespace(String userProvidedNamespace, String defaultNamespacePrefix) { final boolean isSecure = UserGroupInformation.isSecurityEnabled(); String rootNs = userProvidedNamespace; if (rootNs == null) { rootNs = defaultNamespacePrefix + (isSecure ? SASL_NAMESPACE : UNSECURE_NAMESPACE); } return rootNs; }
public SecureProxySupport() { isEnabled = UserGroupInformation.isSecurityEnabled(); }
public LlapSignerImpl(Configuration conf, String clusterId) { // TODO: create this centrally in HS2 case assert UserGroupInformation.isSecurityEnabled(); secretManager = SecretManager.createSecretManager(conf, clusterId); }
private boolean isAllowedDelegationTokenOp() throws IOException { if (UserGroupInformation.isSecurityEnabled()) { return EnumSet.of(UserGroupInformation.AuthenticationMethod.KERBEROS, UserGroupInformation.AuthenticationMethod.KERBEROS_SSL, UserGroupInformation.AuthenticationMethod.CERTIFICATE) .contains(UserGroupInformation.getCurrentUser() .getRealAuthenticationMethod()); } else { return true; } } }
public static void login(Map<String, Object> conf, Configuration hdfsConfig) throws IOException { //If AutoHDFS is specified, do not attempt to login using keytabs, only kept for backward compatibility. if(conf.get(TOPOLOGY_AUTO_CREDENTIALS) == null || (!(((List)conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoHDFS.class.getName())) && !(((List)conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoTGT.class.getName())))) { if (UserGroupInformation.isSecurityEnabled()) { // compareAndSet added because of https://issues.apache.org/jira/browse/STORM-1535 if (isLoggedIn.compareAndSet(false, true)) { LOG.info("Logging in using keytab as AutoHDFS is not specified for " + TOPOLOGY_AUTO_CREDENTIALS); String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY); if (keytab != null) { hdfsConfig.set(STORM_KEYTAB_FILE_KEY, keytab); } String userName = (String) conf.get(STORM_USER_NAME_KEY); if (userName != null) { hdfsConfig.set(STORM_USER_NAME_KEY, userName); } SecurityUtil.login(hdfsConfig, STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY); } } } } }
@Inject public HdfsAuth(final Props props, final Configuration conf) { UserGroupInformation.setConfiguration(conf); this.isSecurityEnabled = UserGroupInformation.isSecurityEnabled(); if (this.isSecurityEnabled) { log.info("The Hadoop cluster has enabled security"); this.keytabPath = requireNonNull(props.getString(AZKABAN_KEYTAB_PATH)); this.keytabPrincipal = requireNonNull(props.getString(AZKABAN_KERBEROS_PRINCIPAL)); } }
public static void checkPermissions( String clusterId, String userName, String appId, Object hint) throws IOException { if (!UserGroupInformation.isSecurityEnabled()) return; Preconditions.checkNotNull(userName); UserGroupInformation current = UserGroupInformation.getCurrentUser(); String kerberosName = current.hasKerberosCredentials() ? current.getShortUserName() : null; List<LlapTokenIdentifier> tokens = getLlapTokens(current, clusterId); checkPermissionsInternal(kerberosName, tokens, userName, appId, hint); }
@Override public void configureJobConf(TableDesc tableDesc, JobConf jobConf) { if (UserGroupInformation.isSecurityEnabled()) { // AM can not do Kerberos Auth so will do the input split generation in the HS2 LOG.debug("Setting {} to {} to enable split generation on HS2", HiveConf.ConfVars.HIVE_AM_SPLIT_GENERATION.toString(), Boolean.FALSE.toString()); jobConf.set(HiveConf.ConfVars.HIVE_AM_SPLIT_GENERATION.toString(), Boolean.FALSE.toString()); } try { DruidStorageHandlerUtils.addDependencyJars(jobConf, DruidRecordWriter.class); } catch (IOException e) { Throwables.propagate(e); } }
public LlapSigner getLlapSigner(final Configuration jobConf) { // Note that we create the cluster name from user conf (hence, a user can target a cluster), // but then we create the signer using hiveConf (hence, we control the ZK config and stuff). assert UserGroupInformation.isSecurityEnabled(); final String clusterId = DaemonId.createClusterString( clusterUser, LlapUtil.generateClusterName(jobConf)); try { return signers.get(clusterId, new Callable<LlapSigner>() { public LlapSigner call() throws Exception { return new LlapSignerImpl(hiveConf, clusterId); } }); } catch (ExecutionException e) { throw new RuntimeException(e); } }
/** {@inheritDoc} */ @Override protected void startUp() throws Exception { try { UserGroupInformation.setConfiguration(_hadoopConf); if (UserGroupInformation.isSecurityEnabled()) { UserGroupInformation.loginUserFromKeytab(_loginUser, _loginUserKeytabFile); } } catch (Throwable t) { log.error("Failed to start up HadoopKerberosKeytabAuthenticationPlugin", t); throw t; } }
public static LlapTokenInfo getTokenInfo(String clusterId) throws IOException { if (!UserGroupInformation.isSecurityEnabled()) return NO_SECURITY; UserGroupInformation current = UserGroupInformation.getCurrentUser(); String kerberosName = current.hasKerberosCredentials() ? current.getShortUserName() : null; List<LlapTokenIdentifier> tokens = getLlapTokens(current, clusterId); if ((tokens == null || tokens.isEmpty()) && kerberosName == null) { throw new SecurityException("No tokens or kerberos for " + current); } warnMultipleTokens(tokens); return getTokenInfoInternal(kerberosName, tokens); }
@Override public void startThreads() throws IOException { String principalUser = LlapUtil.getUserNameFromPrincipal( conf.get(SecretManager.ZK_DTSM_ZK_KERBEROS_PRINCIPAL)); LOG.info("Starting ZK threads as user " + UserGroupInformation.getCurrentUser() + "; kerberos principal is configured for user (short user name) " + principalUser); super.startThreads(); if (!HiveConf.getBoolVar(conf, ConfVars.LLAP_VALIDATE_ACLS) || !UserGroupInformation.isSecurityEnabled()) return; String path = conf.get(ZK_DTSM_ZNODE_WORKING_PATH, null); if (path == null) throw new AssertionError("Path was not set in config"); checkRootAcls(conf, path, principalUser); }
private String checkQueueAccessFromSingleRm(String urlString) throws IOException { URL url = new URL(urlString); HttpURLConnection connection = UserGroupInformation.isSecurityEnabled() ? getSecureConnection(url) : (HttpURLConnection)url.openConnection(); int statusCode = connection.getResponseCode(); switch (statusCode) { case HttpStatus.SC_OK: return processResponse(connection); case HttpStatus.SC_FORBIDDEN: { // Throw a special exception since it's usually a well-known misconfiguration. throw new IOException(handleUnexpectedStatusCode(connection, statusCode, "check that the " + "HiveServer2 principal is in the administrator list of the root YARN queue")); } default: throw new IOException(handleUnexpectedStatusCode(connection, statusCode, null)); } }
/** Verifies the token available as serialized bytes. */ public void verifyToken(byte[] tokenBytes) throws IOException { if (!UserGroupInformation.isSecurityEnabled()) return; if (tokenBytes == null) throw new SecurityException("Token required for authentication"); Token<LlapTokenIdentifier> token = new Token<>(); token.readFields(new DataInputStream(new ByteArrayInputStream(tokenBytes))); verifyToken(token.decodeIdentifier(), token.getPassword()); } }
private long renewToken(Token token, String metaStoreURI, String hiveMetaStorePrincipal) { HCatClient hcatClient = null; if (UserGroupInformation.isSecurityEnabled()) { try { String tokenStr = token.encodeToUrlString(); HiveConf hcatConf = createHiveConf(metaStoreURI, hiveMetaStorePrincipal); LOG.debug("renewing delegation tokens for principal={}", hiveMetaStorePrincipal); hcatClient = HCatClient.create(hcatConf); Long expiryTime = hcatClient.renewDelegationToken(tokenStr); LOG.info("Renewed delegation token. new expiryTime={}", expiryTime); return expiryTime; } catch (Exception ex) { throw new RuntimeException("Failed to renew delegation tokens.", ex); } finally { if (hcatClient != null) try { hcatClient.close(); } catch (HCatException e) { LOG.error(" Exception", e); } } } else { throw new RuntimeException("Security is not enabled for Hadoop"); } }
/** * Dose authenticate against a secured hadoop cluster * In case of any bug fix make sure to fix the code at HdfsStorageAuthentication#authenticate as well. * * @param config containing the principal name and keytab path. */ public static void authenticate(HadoopDruidIndexerConfig config) { String principal = config.HADOOP_KERBEROS_CONFIG.getPrincipal(); String keytab = config.HADOOP_KERBEROS_CONFIG.getKeytab(); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) { Configuration conf = new Configuration(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { try { if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) { log.info("trying to authenticate user [%s] with keytab [%s]", principal, keytab); UserGroupInformation.loginUserFromKeytab(principal, keytab); } } catch (IOException e) { throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab); } } } }
/** * Dose authenticate against a secured hadoop cluster * In case of any bug fix make sure to fix the code in JobHelper#authenticate as well. */ @LifecycleStart public void authenticate() { String principal = hdfsKerberosConfig.getPrincipal(); String keytab = hdfsKerberosConfig.getKeytab(); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) { UserGroupInformation.setConfiguration(hadoopConf); if (UserGroupInformation.isSecurityEnabled()) { try { if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) { log.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab); UserGroupInformation.loginUserFromKeytab(principal, keytab); } } catch (IOException e) { throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab); } } } }
private static Token<JobTokenIdentifier> createAmsToken(ApplicationId id) { if (!UserGroupInformation.isSecurityEnabled()) return null; JobTokenIdentifier identifier = new JobTokenIdentifier(new Text(id.toString())); JobTokenSecretManager jobTokenManager = new JobTokenSecretManager(); Token<JobTokenIdentifier> sessionToken = new Token<>(identifier, jobTokenManager); sessionToken.setService(identifier.getJobId()); return sessionToken; }
public FilterHolder makeAuthFilter() throws IOException { FilterHolder authFilter = new FilterHolder(AuthFilter.class); UserNameHandler.allowAnonymous(authFilter); if (UserGroupInformation.isSecurityEnabled()) { //http://hadoop.apache.org/docs/r1.1.1/api/org/apache/hadoop/security/authentication/server/AuthenticationFilter.html authFilter.setInitParameter("dfs.web.authentication.signature.secret", conf.kerberosSecret()); //https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.2/src/packages/templates/conf/hdfs-site.xml String serverPrincipal = SecurityUtil.getServerPrincipal(conf.kerberosPrincipal(), "0.0.0.0"); authFilter.setInitParameter("dfs.web.authentication.kerberos.principal", serverPrincipal); //http://https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.2/src/packages/templates/conf/hdfs-site.xml authFilter.setInitParameter("dfs.web.authentication.kerberos.keytab", conf.kerberosKeytab()); } return authFilter; }