congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
HCatUtil
Code IndexAdd Tabnine to your IDE (free)

How to use
HCatUtil
in
org.apache.hive.hcatalog.common

Best Java code snippets using org.apache.hive.hcatalog.common.HCatUtil (Showing top 20 results out of 315)

origin: apache/hive

try {
 if (conf != null) {
  hiveConf = HCatUtil.getHiveConf(conf);
 } else {
  hiveConf = new HiveConf(HCatInputFormat.class);
 client = HCatUtil.getHiveMetastoreClient(hiveConf);
 Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(),
  inputJobInfo.getTableName());
    HCatSchema schema = HCatUtil.extractSchema(
        new org.apache.hadoop.hive.ql.metadata.Partition(table, ptn));
    PartInfo partInfo = extractPartInfo(schema, ptn.getSd(),
  HCatSchema schema = HCatUtil.extractSchema(table);
  PartInfo partInfo = extractPartInfo(schema, table.getTTable().getSd(),
   table.getParameters(), conf, inputJobInfo);
 HCatUtil.closeHiveClientQuietly(client);
origin: apache/hive

public static LinkedList<InputJobInfo> getInputJobInfosFromConf(Configuration conf)
    throws IOException {
 LinkedList<InputJobInfo> inputJobInfos = (LinkedList<InputJobInfo>) HCatUtil.deserialize(
     conf.get(HCatConstants.HCAT_KEY_JOB_INFO));
 return inputJobInfos;
}
origin: apache/hive

Configuration conf = jobContext.getConfiguration();
try {
 OutputJobInfo jobInfo = (OutputJobInfo) HCatUtil.deserialize(conf.get(HCatConstants.HCAT_KEY_OUTPUT_INFO));
 HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(jobContext.getConfiguration(),jobInfo.getTableInfo().getStorerInfo());
 HCatUtil.configureOutputStorageHandler(storageHandler, conf, jobInfo);
} catch (Exception e) {
 if (e instanceof HCatException) {
origin: apache/hive

public static void putInputJobInfoToConf(InputJobInfo inputJobInfo, Configuration conf)
 throws IOException {
 LinkedList<InputJobInfo> inputJobInfos = (LinkedList<InputJobInfo>) HCatUtil.deserialize(
     conf.get(HCatConstants.HCAT_KEY_JOB_INFO));
 if (inputJobInfos == null) {
  inputJobInfos = new LinkedList<>();
 }
 inputJobInfos.add(inputJobInfo);
 conf.set(HCatConstants.HCAT_KEY_JOB_INFO, HCatUtil.serialize(inputJobInfos));
}
origin: apache/hive

public static Map<String,String> getHCatKeyHiveConf(JobConf conf) {
 try {
  Properties properties = null;
  if (! StringUtils.isBlank(conf.get(HCatConstants.HCAT_KEY_HIVE_CONF))) {
   properties = (Properties) HCatUtil.deserialize(
     conf.get(HCatConstants.HCAT_KEY_HIVE_CONF));
   LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " is set. Using differences=" + properties);
  } else {
   LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " not set. Generating configuration differences.");
   properties = getHiveSiteOverrides(conf);
  }
  // This method may not be safe as it can throw an NPE if a key or value is null.
  return Maps.fromProperties(properties);
 }
 catch (IOException e) {
  throw new IllegalStateException("Failed to deserialize hive conf", e);
 }
}
origin: apache/hive

@Override
void initialize(Configuration conf) throws HCatException {
 this.config = conf;
 try {
  hiveConfig = HCatUtil.getHiveConf(config);
  hmsClient = HCatUtil.getHiveMetastoreClient(hiveConfig);
 } catch (MetaException exp) {
  throw new HCatException("MetaException while creating HMS client",
   exp);
 } catch (IOException exp) {
  throw new HCatException("IOException while creating HMS client",
   exp);
 }
}
origin: apache/hive

HiveConf hiveConf = HCatUtil.getHiveConf(conf);
client = HCatUtil.getHiveMetastoreClient(hiveConf);
Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(),
 outputJobInfo.getTableName());
HCatSchema tableSchema = HCatUtil.extractSchema(table);
StorerInfo storerInfo =
 InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters());
HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, storerInfo);
outputJobInfo.setMaximumDynamicPartitions(maxDynamicPartitions);
HCatUtil.configureOutputStorageHandler(storageHandler, conf, outputJobInfo);
HCatUtil.closeHiveClientQuietly(client);
origin: apache/hive

try {
 HiveConf hiveConf = HCatUtil
   .getHiveConf(context.getConfiguration());
 client = HCatUtil.getHiveMetastoreClient(hiveConf);
 LOG.warn("TException while cancelling delegation token.", e);
} finally {
 HCatUtil.closeHiveClientQuietly(client);
origin: apache/hive

@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
 OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  handleDuplicatePublish(context,
   jobInfo,
   client,
   new Table(jobInfo.getTableInfo().getTable()));
 } catch (MetaException e) {
  throw new IOException(e);
 } catch (TException e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 if (!jobInfo.isDynamicPartitioningUsed()) {
  JobConf jobConf = new JobConf(context.getConfiguration());
  getBaseOutputFormat().checkOutputSpecs(null, jobConf);
  //checkoutputspecs might've set some properties we need to have context reflect that
  HCatUtil.copyConf(jobConf, context.getConfiguration());
 }
}
origin: apache/hive

 @Override
 public String run() throws IOException, TException, InterruptedException  {
  final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c);
  return ugi.doAs(new PrivilegedExceptionAction<String>() {
   @Override
   public String run() throws IOException, TException, InterruptedException {
    String u = ugi.getUserName();
    return client.getDelegationToken(c.getUser(),u);
   }
  });
 }
});
origin: apache/crunch

private Table getHiveTable(Configuration conf) throws IOException, TException {
 if (hiveTableCached != null) {
  return hiveTableCached;
 }
 IMetaStoreClient hiveMetastoreClient = HCatUtil.getHiveMetastoreClient(new HiveConf(conf, HCatSourceTarget.class));
 hiveTableCached = HCatUtil.getTable(hiveMetastoreClient, database, table);
 return hiveTableCached;
}
origin: apache/hive

public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal,
  Job job) throws IOException {
 Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri);
 Table hcatTable = hcatTableCache.get(loc_server);
 if (hcatTable != null) {
  return hcatTable;
 }
 Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
 String dbName = dbTablePair.first;
 String tableName = dbTablePair.second;
 Table table = null;
 IMetaStoreClient client = null;
 try {
  client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class, job);
  table = HCatUtil.getTable(client, dbName, tableName);
 } catch (NoSuchObjectException nsoe) {
  throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend
 } catch (Exception e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 hcatTableCache.put(loc_server, table);
 return table;
}
origin: org.apache.beam/beam-sdks-java-io-hcatalog

/**
 * Returns the size of the table in bytes, does not take into consideration filter/partition
 * details passed, if any.
 */
@Override
public long getEstimatedSizeBytes(PipelineOptions pipelineOptions) throws Exception {
 Configuration conf = new Configuration();
 for (Entry<String, String> entry : spec.getConfigProperties().entrySet()) {
  conf.set(entry.getKey(), entry.getValue());
 }
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil.getHiveConf(conf);
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  Table table = HCatUtil.getTable(client, spec.getDatabase(), spec.getTable());
  return StatsUtils.getFileSizeForTable(hiveConf, table);
 } finally {
  // IMetaStoreClient is not AutoCloseable, closing it manually
  if (client != null) {
   client.close();
  }
 }
}
origin: apache/hive

     DelegationTokenCache.getStringFormTokenCache().getDelegationToken(id);
 if(metastoreTokenStrForm != null) {
  client = HCatUtil.getHiveMetastoreClient(new HiveConf());
  client.cancelDelegationToken(metastoreTokenStrForm);
  LOG.debug("Cancelled token for jobId=" + id + " status from JT=" + jobStatus);
HCatUtil.closeHiveClientQuietly(client);
origin: apache/hive

private static void validatePermissions(CliSessionState ss, HiveConf conf, String perms) {
 perms = perms.trim();
 FsPermission fp = null;
 if (perms.matches("^\\s*([r,w,x,-]{9})\\s*$")) {
  fp = FsPermission.valueOf("d" + perms);
 } else if (perms.matches("^\\s*([0-7]{3})\\s*$")) {
  fp = new FsPermission(Short.decode("0" + perms));
 } else {
  ss.err.println("Invalid permission specification: " + perms);
  sysExit(ss,1);
 }
 if (!HCatUtil.validateMorePermissive(fp.getUserAction(), fp.getGroupAction())) {
  ss.err.println("Invalid permission specification: " + perms + " : user permissions must be more permissive than group permission ");
  sysExit(ss,1);
 }
 if (!HCatUtil.validateMorePermissive(fp.getGroupAction(), fp.getOtherAction())) {
  ss.err.println("Invalid permission specification: " + perms + " : group permissions must be more permissive than other permission ");
  sysExit(ss,1);
 }
 if ((!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getUserAction())) ||
  (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getGroupAction())) ||
  (!HCatUtil.validateExecuteBitPresentIfReadOrWrite(fp.getOtherAction()))) {
  ss.err.println("Invalid permission specification: " + perms + " : permissions must have execute permissions if read or write permissions are specified ");
  sysExit(ss,1);
 }
 conf.set(HCatConstants.HCAT_PERMS, "d" + fp.toString());
}
origin: apache/hive

public static HiveConf getHiveConf(Configuration conf)
 throws IOException {
 HiveConf hiveConf = new HiveConf(conf, HCatUtil.class);
 //copy the hive conf into the job conf and restore it
 //in the backend context
 if (StringUtils.isBlank(conf.get(HCatConstants.HCAT_KEY_HIVE_CONF))) {
  // Called once on the client.
  LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " not set. Generating configuration differences.");
  Properties differences = getHiveSiteOverrides(conf);
  // Must set this key even if differences is empty otherwise client and AM will attempt
  // to set this multiple times.
  conf.set(HCatConstants.HCAT_KEY_HIVE_CONF, HCatUtil.serialize(differences));
 } else {
  // Called one or more times on the client and AM.
  LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " is set. Applying configuration differences.");
  Properties properties = (Properties) HCatUtil.deserialize(
    conf.get(HCatConstants.HCAT_KEY_HIVE_CONF));
  storePropertiesToHiveConf(properties, hiveConf);
 }
 if (conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
  hiveConf.setVar(HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE,
   conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE));
 }
 return hiveConf;
}
origin: apache/hive

/**
 * create an HCatTableInfo instance from the supplied Hive Table instance
 * @param table to create an instance from
 * @return HCatTableInfo
 * @throws IOException
 */
static HCatTableInfo valueOf(Table table) throws IOException {
 // Explicitly use {@link org.apache.hadoop.hive.ql.metadata.Table} when getting the schema,
 // but store @{link org.apache.hadoop.hive.metastore.api.Table} as this class is serialized
 // into the job conf.
 org.apache.hadoop.hive.ql.metadata.Table mTable =
  new org.apache.hadoop.hive.ql.metadata.Table(table);
 HCatSchema schema = HCatUtil.extractSchema(mTable);
 StorerInfo storerInfo =
  InternalUtil.extractStorerInfo(table.getSd(), table.getParameters());
 HCatSchema partitionColumns = HCatUtil.getPartitionColumns(mTable);
 return new HCatTableInfo(table.getDbName(), table.getTableName(), schema,
  partitionColumns, storerInfo, table);
}
origin: apache/hive

     HCatUtil.getLastInputJobInfosFromConf(taskContext.getConfiguration()).getTableInfo());
HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(
 conf, partitionInfo);
JobConf jobConf = HCatUtil.getJobConfFromContext(jobContext);
Map<String, String> jobProperties = partitionInfo.getJobProperties();
HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
origin: apache/hive

/**
 * Set the schema for the HCatRecord data returned by HCatInputFormat.
 * @param job the job object
 * @param hcatSchema the schema to use as the consolidated schema
 */
public static void setOutputSchema(Job job, HCatSchema hcatSchema)
 throws IOException {
 job.getConfiguration().set(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA,
  HCatUtil.serialize(hcatSchema));
}
origin: apache/hive

JobConf jobConf = HCatUtil.getJobConfFromContext(jobContext);
if (hiveProps == null) {
 hiveProps = HCatUtil.getHCatKeyHiveConf(jobConf);
HCatUtil.copyJobPropertiesToJobConf(hiveProps, jobConf);
HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
storageHandler = HCatUtil.getStorageHandler(
 jobConf, partitionInfo);
org.apache.hive.hcatalog.commonHCatUtil

Most used methods

  • getHiveMetastoreClient
    Get or create a hive client depending on whether it exits in cache or not
  • getTable
  • closeHiveClientQuietly
  • deserialize
  • getHiveConf
  • extractSchema
  • getStorageHandler
    Create an instance of a storage handler defined in storerInfo. If one cannot be found then FosterSto
  • serialize
  • validateExecuteBitPresentIfReadOrWrite
    Ensure that read or write permissions are not granted without also granting execute permissions. Ess
  • validateMorePermissive
    Test if the first FsAction is more permissive than the second. This is useful in cases where we want
  • configureOutputStorageHandler
  • copyConf
    Replace the contents of dest with the contents of src
  • configureOutputStorageHandler,
  • copyConf,
  • copyJobPropertiesToJobConf,
  • decodeBytes,
  • encodeBytes,
  • extractThriftToken,
  • getFieldSchemaList,
  • getHCatFieldSchemaList,
  • getInputJobProperties,
  • getJobConfFromContext

Popular in Java

  • Running tasks concurrently on multiple threads
  • compareTo (BigDecimal)
  • getResourceAsStream (ClassLoader)
  • scheduleAtFixedRate (Timer)
  • EOFException (java.io)
    Thrown when a program encounters the end of a file or stream during an input operation.
  • PrintStream (java.io)
    Fake signature of an existing Java class.
  • RandomAccessFile (java.io)
    Allows reading from and writing to a file in a random-access manner. This is different from the uni-
  • MessageFormat (java.text)
    Produces concatenated messages in language-neutral way. New code should probably use java.util.Forma
  • Dictionary (java.util)
    Note: Do not use this class since it is obsolete. Please use the Map interface for new implementatio
  • Set (java.util)
    A Set is a data structure which does not allow duplicate elements.
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now