@Override public String run() throws IOException, TException, InterruptedException { final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c); return ugi.doAs(new PrivilegedExceptionAction<String>() { @Override public String run() throws IOException, TException, InterruptedException { String u = ugi.getUserName(); return client.getDelegationToken(c.getUser(),u); } }); } });
private String buildHcatDelegationToken(String user) throws IOException, InterruptedException, TException { final HiveConf c = new HiveConf(); final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c); LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName()); final UserGroupInformation ugi = UgiFactory.getUgi(user); String s = ugi.doAs(new PrivilegedExceptionAction<String>() { public String run() throws IOException, MetaException, TException { String u = ugi.getUserName(); return client.getDelegationToken(c.getUser(), u); } }); FileSystem.closeAllForUGI(ugi); return s; } }
HiveConf hiveConf = HCatUtil .getHiveConf(context.getConfiguration()); client = HCatUtil.getHiveMetastoreClient(hiveConf);
/** Builds the client. */ public MutatorClient build() throws ClientException, MetaException { String user = authenticatedUser == null ? System.getProperty("user.name") : authenticatedUser.getShortUserName(); boolean secureMode = authenticatedUser == null ? false : authenticatedUser.hasKerberosCredentials(); configuration = HiveConfFactory.newInstance(configuration, this.getClass(), metaStoreUri); IMetaStoreClient metaStoreClient; try { metaStoreClient = new UgiMetaStoreClientFactory(metaStoreUri, configuration, authenticatedUser, user, secureMode) .newInstance(HCatUtil.getHiveMetastoreClient(configuration)); } catch (IOException e) { throw new ClientException("Could not create meta store client.", e); } return new MutatorClient(metaStoreClient, configuration, lockFailureListener, user, tables.values()); }
@Override void initialize(Configuration conf) throws HCatException { this.config = conf; try { hiveConfig = HCatUtil.getHiveConf(config); hmsClient = HCatUtil.getHiveMetastoreClient(hiveConfig); } catch (MetaException exp) { throw new HCatException("MetaException while creating HMS client", exp); } catch (IOException exp) { throw new HCatException("IOException while creating HMS client", exp); } }
private static IMetaStoreClient getHiveMetaClient(String serverUri, String serverKerberosPrincipal, Class<?> clazz, Job job) throws Exception { // The job configuration is passed in so the configuration will be cloned // from the pig job configuration. This is necessary for overriding // metastore configuration arguments like the metastore jdbc connection string // and password, in the case of an embedded metastore, which you get when // hive.metastore.uris = "". HiveConf hiveConf = new HiveConf(job.getConfiguration(), clazz); if (serverUri != null) { hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim()); } if (serverKerberosPrincipal != null) { hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true); hiveConf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, serverKerberosPrincipal); } try { return HCatUtil.getHiveMetastoreClient(hiveConf); } catch (Exception e) { throw new Exception("Could not instantiate a HiveMetaStoreClient connecting to server uri:[" + serverUri + "]", e); } }
@Override public void cleanupJob(JobContext context) throws IOException { getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context)); //Cancel HCat and JobTracker tokens IMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration()); client = HCatUtil.getHiveMetastoreClient(hiveConf); String tokenStrForm = client.getTokenStrForm(); if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) { client.cancelDelegationToken(tokenStrForm); } } catch (Exception e) { LOG.warn("Failed to cancel delegation token", e); } finally { HCatUtil.closeHiveClientQuietly(client); } } }
private static IMetaStoreClient getMetaStoreClient(HiveEndPoint endPoint, HiveConf conf, boolean secureMode) throws ConnectionError { if (endPoint.metaStoreUri!= null) { conf.setVar(HiveConf.ConfVars.METASTOREURIS, endPoint.metaStoreUri); } if(secureMode) { conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,true); } try { return HCatUtil.getHiveMetastoreClient(conf); } catch (MetaException e) { throw new ConnectionError("Error connecting to Hive Metastore URI: " + endPoint.metaStoreUri + ". " + e.getMessage(), e); } catch (IOException e) { throw new ConnectionError("Error connecting to Hive Metastore URI: " + endPoint.metaStoreUri + ". " + e.getMessage(), e); } } } // class ConnectionImpl
private PartitionHelper newMetaStorePartitionHelper() throws MetaException, WorkerException { String user = authenticatedUser == null ? System.getProperty("user.name") : authenticatedUser.getShortUserName(); boolean secureMode = authenticatedUser == null ? false : authenticatedUser.hasKerberosCredentials(); try { IMetaStoreClient metaStoreClient = new UgiMetaStoreClientFactory(metaStoreUri, configuration, authenticatedUser, user, secureMode).newInstance(HCatUtil.getHiveMetastoreClient(configuration)); String tableLocation = table.getTable().getSd().getLocation(); Path tablePath = new Path(tableLocation); return new MetaStorePartitionHelper(metaStoreClient, table.getDatabaseName(), table.getTableName(), tablePath); } catch (IOException e) { throw new WorkerException("Could not create meta store client.", e); } }
@Override public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException { OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration()); IMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration()); client = HCatUtil.getHiveMetastoreClient(hiveConf); handleDuplicatePublish(context, jobInfo, client, new Table(jobInfo.getTableInfo().getTable())); } catch (MetaException e) { throw new IOException(e); } catch (TException e) { throw new IOException(e); } finally { HCatUtil.closeHiveClientQuietly(client); } if (!jobInfo.isDynamicPartitioningUsed()) { JobConf jobConf = new JobConf(context.getConfiguration()); getBaseOutputFormat().checkOutputSpecs(null, jobConf); //checkoutputspecs might've set some properties we need to have context reflect that HCatUtil.copyConf(jobConf, context.getConfiguration()); } }
@BeforeClass public static void setUpOneTime() throws Exception { fs = new LocalFileSystem(); fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration()); HiveConf hiveConf = new HiveConf(); hiveConf.setInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME, 0); // Hack to initialize cache with 0 expiry time causing it to return a new hive client every time // Otherwise the cache doesn't play well with the second test method with the client gets closed() in the // tearDown() of the previous test HCatUtil.getHiveMetastoreClient(hiveConf); MapCreate.writeCount = 0; MapRead.readCount = 0; }
client = HCatUtil.getHiveMetastoreClient(hiveConf); Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(), outputJobInfo.getTableName());
: HiveEndPoint.createHiveConf(DelimitedInputWriter.class, endPoint.metaStoreUri); try { msClient = HCatUtil.getHiveMetastoreClient(this.conf); UserGroupInformation ugi = conn != null ? conn.getUserGroupInformation() : null; if (ugi == null) {
try { HiveConf hiveConf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveMetastoreClient(hiveConf); if (table.getPartitionKeys().size() == 0) {
hiveConf = new HiveConf(HCatInputFormat.class); client = HCatUtil.getHiveMetastoreClient(hiveConf); Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(), inputJobInfo.getTableName());
DelegationTokenCache.getStringFormTokenCache().getDelegationToken(id); if(metastoreTokenStrForm != null) { client = HCatUtil.getHiveMetastoreClient(new HiveConf()); client.cancelDelegationToken(metastoreTokenStrForm); LOG.debug("Cancelled token for jobId=" + id + " status from JT=" + jobStatus);
@Override public String run() throws IOException, TException, InterruptedException { final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c); return ugi.doAs(new PrivilegedExceptionAction<String>() { @Override public String run() throws IOException, TException, InterruptedException { String u = ugi.getUserName(); return client.getDelegationToken(c.getUser(),u); } }); } });
private Table getHiveTable(Configuration conf) throws IOException, TException { if (hiveTableCached != null) { return hiveTableCached; } IMetaStoreClient hiveMetastoreClient = HCatUtil.getHiveMetastoreClient(new HiveConf(conf, HCatSourceTarget.class)); hiveTableCached = HCatUtil.getTable(hiveMetastoreClient, database, table); return hiveTableCached; }
@Override void initialize(Configuration conf) throws HCatException { this.config = conf; try { hiveConfig = HCatUtil.getHiveConf(config); hmsClient = HCatUtil.getHiveMetastoreClient(hiveConfig); } catch (MetaException exp) { throw new HCatException("MetaException while creating HMS client", exp); } catch (IOException exp) { throw new HCatException("IOException while creating HMS client", exp); } }
private Table getHiveTable(Configuration conf) throws IOException, TException { if (hiveTableCached != null) { return hiveTableCached; } IMetaStoreClient hiveMetastoreClient = HCatUtil.getHiveMetastoreClient(new HiveConf(conf, HCatTarget.class)); hiveTableCached = HCatUtil.getTable(hiveMetastoreClient, info.getDatabaseName(), info.getTableName()); return hiveTableCached; } }