try { if (conf != null) { hiveConf = HCatUtil.getHiveConf(conf); } else { hiveConf = new HiveConf(HCatInputFormat.class); client = HCatUtil.getHiveMetastoreClient(hiveConf); Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(), inputJobInfo.getTableName()); HCatSchema schema = HCatUtil.extractSchema( new org.apache.hadoop.hive.ql.metadata.Partition(table, ptn)); PartInfo partInfo = extractPartInfo(schema, ptn.getSd(), HCatSchema schema = HCatUtil.extractSchema(table); PartInfo partInfo = extractPartInfo(schema, table.getTTable().getSd(), table.getParameters(), conf, inputJobInfo); HCatUtil.closeHiveClientQuietly(client);
public static LinkedList<InputJobInfo> getInputJobInfosFromConf(Configuration conf) throws IOException { LinkedList<InputJobInfo> inputJobInfos = (LinkedList<InputJobInfo>) HCatUtil.deserialize( conf.get(HCatConstants.HCAT_KEY_JOB_INFO)); return inputJobInfos; }
Configuration conf = jobContext.getConfiguration(); try { OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(conf.get(HCatConstants.HCAT_KEY_OUTPUT_INFO)); HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(jobContext.getConfiguration(),jobInfo.getTableInfo().getStorerInfo()); HCatUtil.configureOutputStorageHandler(storageHandler, conf, jobInfo); } catch (Exception e) { if (e instanceof HCatException) {
public static void putInputJobInfoToConf(InputJobInfo inputJobInfo, Configuration conf) throws IOException { LinkedList<InputJobInfo> inputJobInfos = (LinkedList<InputJobInfo>) HCatUtil.deserialize( conf.get(HCatConstants.HCAT_KEY_JOB_INFO)); if (inputJobInfos == null) { inputJobInfos = new LinkedList<>(); } inputJobInfos.add(inputJobInfo); conf.set(HCatConstants.HCAT_KEY_JOB_INFO, HCatUtil.serialize(inputJobInfos)); }
public static Map<String,String> getHCatKeyHiveConf(JobConf conf) { try { Properties properties = null; if (! StringUtils.isBlank(conf.get(HCatConstants.HCAT_KEY_HIVE_CONF))) { properties = (Properties) HCatUtil.deserialize( conf.get(HCatConstants.HCAT_KEY_HIVE_CONF)); LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " is set. Using differences=" + properties); } else { LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " not set. Generating configuration differences."); properties = getHiveSiteOverrides(conf); } // This method may not be safe as it can throw an NPE if a key or value is null. return Maps.fromProperties(properties); } catch (IOException e) { throw new IllegalStateException("Failed to deserialize hive conf", e); } }
@Override void initialize(Configuration conf) throws HCatException { this.config = conf; try { hiveConfig = HCatUtil.getHiveConf(config); hmsClient = HCatUtil.getHiveMetastoreClient(hiveConfig); } catch (MetaException exp) { throw new HCatException("MetaException while creating HMS client", exp); } catch (IOException exp) { throw new HCatException("IOException while creating HMS client", exp); } }
HiveConf hiveConf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveMetastoreClient(hiveConf); Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(), outputJobInfo.getTableName()); HCatSchema tableSchema = HCatUtil.extractSchema(table); StorerInfo storerInfo = InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters()); HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, storerInfo); outputJobInfo.setMaximumDynamicPartitions(maxDynamicPartitions); HCatUtil.configureOutputStorageHandler(storageHandler, conf, outputJobInfo); HCatUtil.closeHiveClientQuietly(client);
try { HiveConf hiveConf = HCatUtil .getHiveConf(context.getConfiguration()); client = HCatUtil.getHiveMetastoreClient(hiveConf); LOG.warn("TException while cancelling delegation token.", e); } finally { HCatUtil.closeHiveClientQuietly(client);
@Override public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException { OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration()); IMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration()); client = HCatUtil.getHiveMetastoreClient(hiveConf); handleDuplicatePublish(context, jobInfo, client, new Table(jobInfo.getTableInfo().getTable())); } catch (MetaException e) { throw new IOException(e); } catch (TException e) { throw new IOException(e); } finally { HCatUtil.closeHiveClientQuietly(client); } if (!jobInfo.isDynamicPartitioningUsed()) { JobConf jobConf = new JobConf(context.getConfiguration()); getBaseOutputFormat().checkOutputSpecs(null, jobConf); //checkoutputspecs might've set some properties we need to have context reflect that HCatUtil.copyConf(jobConf, context.getConfiguration()); } }
@Override public String run() throws IOException, TException, InterruptedException { final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c); return ugi.doAs(new PrivilegedExceptionAction<String>() { @Override public String run() throws IOException, TException, InterruptedException { String u = ugi.getUserName(); return client.getDelegationToken(c.getUser(),u); } }); } });
private Table getHiveTable(Configuration conf) throws IOException, TException { if (hiveTableCached != null) { return hiveTableCached; } IMetaStoreClient hiveMetastoreClient = HCatUtil.getHiveMetastoreClient(new HiveConf(conf, HCatSourceTarget.class)); hiveTableCached = HCatUtil.getTable(hiveMetastoreClient, database, table); return hiveTableCached; }
public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal, Job job) throws IOException { Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri); Table hcatTable = hcatTableCache.get(loc_server); if (hcatTable != null) { return hcatTable; } Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location); String dbName = dbTablePair.first; String tableName = dbTablePair.second; Table table = null; IMetaStoreClient client = null; try { client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class, job); table = HCatUtil.getTable(client, dbName, tableName); } catch (NoSuchObjectException nsoe) { throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend } catch (Exception e) { throw new IOException(e); } finally { HCatUtil.closeHiveClientQuietly(client); } hcatTableCache.put(loc_server, table); return table; }
/** * Returns the size of the table in bytes, does not take into consideration filter/partition * details passed, if any. */ @Override public long getEstimatedSizeBytes(PipelineOptions pipelineOptions) throws Exception { Configuration conf = new Configuration(); for (Entry<String, String> entry : spec.getConfigProperties().entrySet()) { conf.set(entry.getKey(), entry.getValue()); } IMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveMetastoreClient(hiveConf); Table table = HCatUtil.getTable(client, spec.getDatabase(), spec.getTable()); return StatsUtils.getFileSizeForTable(hiveConf, table); } finally { // IMetaStoreClient is not AutoCloseable, closing it manually if (client != null) { client.close(); } } }
DelegationTokenCache.getStringFormTokenCache().getDelegationToken(id); if(metastoreTokenStrForm != null) { client = HCatUtil.getHiveMetastoreClient(new HiveConf()); client.cancelDelegationToken(metastoreTokenStrForm); LOG.debug("Cancelled token for jobId=" + id + " status from JT=" + jobStatus); HCatUtil.closeHiveClientQuietly(client);
private static void validatePermissions(CliSessionState ss, HiveConf conf, String perms) { perms = perms.trim(); FsPermission fp = null; if (perms.matches("^\\s*([r,w,x,-]{9})\\s*$")) { fp = FsPermission.valueOf("d" + perms); } else if (perms.matches("^\\s*([0-7]{3})\\s*$")) { fp = new FsPermission(Short.decode("0" + perms)); } else { ss.err.println("Invalid permission specification: " + perms); sysExit(ss,1); } if (!HCatUtil.validateMorePermissive(fp.getUserAction(), fp.getGroupAction())) { ss.err.println("Invalid permission specification: " + perms + " : user permissions must be more permissive than group permission "); sysExit(ss,1); } if (!HCatUtil.validateMorePermissive(fp.getGroupAction(), fp.getOtherAction())) { ss.err.println("Invalid permission specification: " + perms + " : group permissions must be more permissive than other permission "); sysExit(ss,1); } if ((!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getUserAction())) || (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getGroupAction())) || (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getOtherAction()))) { ss.err.println("Invalid permission specification: " + perms + " : permissions must have execute permissions if read or write permissions are specified "); sysExit(ss,1); } conf.set(HCatConstants.HCAT_PERMS, "d" + fp.toString()); }
public static HiveConf getHiveConf(Configuration conf) throws IOException { HiveConf hiveConf = new HiveConf(conf, HCatUtil.class); //copy the hive conf into the job conf and restore it //in the backend context if (StringUtils.isBlank(conf.get(HCatConstants.HCAT_KEY_HIVE_CONF))) { // Called once on the client. LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " not set. Generating configuration differences."); Properties differences = getHiveSiteOverrides(conf); // Must set this key even if differences is empty otherwise client and AM will attempt // to set this multiple times. conf.set(HCatConstants.HCAT_KEY_HIVE_CONF, HCatUtil.serialize(differences)); } else { // Called one or more times on the client and AM. LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " is set. Applying configuration differences."); Properties properties = (Properties) HCatUtil.deserialize( conf.get(HCatConstants.HCAT_KEY_HIVE_CONF)); storePropertiesToHiveConf(properties, hiveConf); } if (conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) { hiveConf.setVar(HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE, conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE)); } return hiveConf; }
/** * create an HCatTableInfo instance from the supplied Hive Table instance * @param table to create an instance from * @return HCatTableInfo * @throws IOException */ static HCatTableInfo valueOf(Table table) throws IOException { // Explicitly use {@link org.apache.hadoop.hive.ql.metadata.Table} when getting the schema, // but store @{link org.apache.hadoop.hive.metastore.api.Table} as this class is serialized // into the job conf. org.apache.hadoop.hive.ql.metadata.Table mTable = new org.apache.hadoop.hive.ql.metadata.Table(table); HCatSchema schema = HCatUtil.extractSchema(mTable); StorerInfo storerInfo = InternalUtil.extractStorerInfo(table.getSd(), table.getParameters()); HCatSchema partitionColumns = HCatUtil.getPartitionColumns(mTable); return new HCatTableInfo(table.getDbName(), table.getTableName(), schema, partitionColumns, storerInfo, table); }
HCatUtil.getLastInputJobInfosFromConf(taskContext.getConfiguration()).getTableInfo()); HiveStorageHandler storageHandler = HCatUtil.getStorageHandler( conf, partitionInfo); JobConf jobConf = HCatUtil.getJobConfFromContext(jobContext); Map<String, String> jobProperties = partitionInfo.getJobProperties(); HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
/** * Set the schema for the HCatRecord data returned by HCatInputFormat. * @param job the job object * @param hcatSchema the schema to use as the consolidated schema */ public static void setOutputSchema(Job job, HCatSchema hcatSchema) throws IOException { job.getConfiguration().set(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA, HCatUtil.serialize(hcatSchema)); }
JobConf jobConf = HCatUtil.getJobConfFromContext(jobContext); if (hiveProps == null) { hiveProps = HCatUtil.getHCatKeyHiveConf(jobConf); HCatUtil.copyJobPropertiesToJobConf(hiveProps, jobConf); HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf); storageHandler = HCatUtil.getStorageHandler( jobConf, partitionInfo);