Tabnine Logo
HCatUtil.closeHiveClientQuietly
Code IndexAdd Tabnine to your IDE (free)

How to use
closeHiveClientQuietly
method
in
org.apache.hive.hcatalog.common.HCatUtil

Best Java code snippets using org.apache.hive.hcatalog.common.HCatUtil.closeHiveClientQuietly (Showing top 20 results out of 315)

origin: apache/hive

 LOG.warn("TException while cancelling delegation token.", e);
} finally {
 HCatUtil.closeHiveClientQuietly(client);
origin: apache/hive

 @Override
 public void cleanupJob(JobContext context) throws IOException {
  getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));

  //Cancel HCat and JobTracker tokens
  IMetaStoreClient client = null;
  try {
   HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
   client = HCatUtil.getHiveMetastoreClient(hiveConf);
   String tokenStrForm = client.getTokenStrForm();
   if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
    client.cancelDelegationToken(tokenStrForm);
   }
  } catch (Exception e) {
   LOG.warn("Failed to cancel delegation token", e);
  } finally {
   HCatUtil.closeHiveClientQuietly(client);
  }
 }
}
origin: apache/hive

public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal,
  Job job) throws IOException {
 Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri);
 Table hcatTable = hcatTableCache.get(loc_server);
 if (hcatTable != null) {
  return hcatTable;
 }
 Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
 String dbName = dbTablePair.first;
 String tableName = dbTablePair.second;
 Table table = null;
 IMetaStoreClient client = null;
 try {
  client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class, job);
  table = HCatUtil.getTable(client, dbName, tableName);
 } catch (NoSuchObjectException nsoe) {
  throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend
 } catch (Exception e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 hcatTableCache.put(loc_server, table);
 return table;
}
origin: apache/hive

@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
 OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  handleDuplicatePublish(context,
   jobInfo,
   client,
   new Table(jobInfo.getTableInfo().getTable()));
 } catch (MetaException e) {
  throw new IOException(e);
 } catch (TException e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 if (!jobInfo.isDynamicPartitioningUsed()) {
  JobConf jobConf = new JobConf(context.getConfiguration());
  getBaseOutputFormat().checkOutputSpecs(null, jobConf);
  //checkoutputspecs might've set some properties we need to have context reflect that
  HCatUtil.copyConf(jobConf, context.getConfiguration());
 }
}
origin: apache/hive

HCatUtil.closeHiveClientQuietly(client);
origin: apache/hive

HCatUtil.closeHiveClientQuietly(client);
origin: apache/hive

HCatUtil.closeHiveClientQuietly(client);
origin: apache/hive

HCatUtil.closeHiveClientQuietly(client);
origin: com.github.hyukjinkwon.hcatalog/hive-hcatalog-core

private void cancelDelegationTokens(JobContext context) throws IOException{
 LOG.info("Cancelling delegation token for the job.");
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil
    .getHiveConf(context.getConfiguration());
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  // cancel the deleg. tokens that were acquired for this job now that
  // we are done - we should cancel if the tokens were acquired by
  // HCatOutputFormat and not if they were supplied by Oozie.
  // In the latter case the HCAT_KEY_TOKEN_SIGNATURE property in
  // the conf will not be set
  String tokenStrForm = client.getTokenStrForm();
  if (tokenStrForm != null
    && context.getConfiguration().get(
      HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
   client.cancelDelegationToken(tokenStrForm);
  }
 } catch (MetaException e) {
  LOG.warn("MetaException while cancelling delegation token.", e);
 } catch (TException e) {
  LOG.warn("TException while cancelling delegation token.", e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
}
origin: org.apache.hive.hcatalog/hive-hcatalog-core

 @Override
 public void cleanupJob(JobContext context) throws IOException {
  getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));

  //Cancel HCat and JobTracker tokens
  IMetaStoreClient client = null;
  try {
   HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
   client = HCatUtil.getHiveMetastoreClient(hiveConf);
   String tokenStrForm = client.getTokenStrForm();
   if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
    client.cancelDelegationToken(tokenStrForm);
   }
  } catch (Exception e) {
   LOG.warn("Failed to cancel delegation token", e);
  } finally {
   HCatUtil.closeHiveClientQuietly(client);
  }
 }
}
origin: com.github.hyukjinkwon.hcatalog/hive-hcatalog-core

 @Override
 public void cleanupJob(JobContext context) throws IOException {
  getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));

  //Cancel HCat and JobTracker tokens
  IMetaStoreClient client = null;
  try {
   HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
   client = HCatUtil.getHiveMetastoreClient(hiveConf);
   String tokenStrForm = client.getTokenStrForm();
   if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
    client.cancelDelegationToken(tokenStrForm);
   }
  } catch (Exception e) {
   LOG.warn("Failed to cancel delegation token", e);
  } finally {
   HCatUtil.closeHiveClientQuietly(client);
  }
 }
}
origin: org.spark-project.hive.hcatalog/hive-hcatalog-core

 @Override
 public void cleanupJob(JobContext context) throws IOException {
  getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));

  //Cancel HCat and JobTracker tokens
  IMetaStoreClient client = null;
  try {
   HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
   client = HCatUtil.getHiveMetastoreClient(hiveConf);
   String tokenStrForm = client.getTokenStrForm();
   if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
    client.cancelDelegationToken(tokenStrForm);
   }
  } catch (Exception e) {
   LOG.warn("Failed to cancel delegation token", e);
  } finally {
   HCatUtil.closeHiveClientQuietly(client);
  }
 }
}
origin: com.github.hyukjinkwon.hcatalog/hive-hcatalog-pig-adapter

public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal,
  Job job) throws IOException {
 Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri);
 Table hcatTable = hcatTableCache.get(loc_server);
 if (hcatTable != null) {
  return hcatTable;
 }
 Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
 String dbName = dbTablePair.first;
 String tableName = dbTablePair.second;
 Table table = null;
 IMetaStoreClient client = null;
 try {
  client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class, job);
  table = HCatUtil.getTable(client, dbName, tableName);
 } catch (NoSuchObjectException nsoe) {
  throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend
 } catch (Exception e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 hcatTableCache.put(loc_server, table);
 return table;
}
origin: com.facebook.presto.hive/hive-apache

@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
 OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  handleDuplicatePublish(context,
   jobInfo,
   client,
   new Table(jobInfo.getTableInfo().getTable()));
 } catch (MetaException e) {
  throw new IOException(e);
 } catch (TException e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 if (!jobInfo.isDynamicPartitioningUsed()) {
  JobConf jobConf = new JobConf(context.getConfiguration());
  getBaseOutputFormat().checkOutputSpecs(null, jobConf);
  //checkoutputspecs might've set some properties we need to have context reflect that
  HCatUtil.copyConf(jobConf, context.getConfiguration());
 }
}
origin: org.spark-project.hive.hcatalog/hive-hcatalog-core

@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
 OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  handleDuplicatePublish(context,
   jobInfo,
   client,
   new Table(jobInfo.getTableInfo().getTable()));
 } catch (MetaException e) {
  throw new IOException(e);
 } catch (TException e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 if (!jobInfo.isDynamicPartitioningUsed()) {
  JobConf jobConf = new JobConf(context.getConfiguration());
  getBaseOutputFormat().checkOutputSpecs(null, jobConf);
  //checkoutputspecs might've set some properties we need to have context reflect that
  HCatUtil.copyConf(jobConf, context.getConfiguration());
 }
}
origin: com.github.hyukjinkwon.hcatalog/hive-hcatalog-core

@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
 OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  handleDuplicatePublish(context,
   jobInfo,
   client,
   new Table(jobInfo.getTableInfo().getTable()));
 } catch (MetaException e) {
  throw new IOException(e);
 } catch (TException e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 if (!jobInfo.isDynamicPartitioningUsed()) {
  JobConf jobConf = new JobConf(context.getConfiguration());
  getBaseOutputFormat().checkOutputSpecs(null, jobConf);
  //checkoutputspecs might've set some properties we need to have context reflect that
  HCatUtil.copyConf(jobConf, context.getConfiguration());
 }
}
origin: org.apache.hive.hcatalog/hive-hcatalog-core

@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
 OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  handleDuplicatePublish(context,
   jobInfo,
   client,
   new Table(jobInfo.getTableInfo().getTable()));
 } catch (MetaException e) {
  throw new IOException(e);
 } catch (TException e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 if (!jobInfo.isDynamicPartitioningUsed()) {
  JobConf jobConf = new JobConf(context.getConfiguration());
  getBaseOutputFormat().checkOutputSpecs(null, jobConf);
  //checkoutputspecs might've set some properties we need to have context reflect that
  HCatUtil.copyConf(jobConf, context.getConfiguration());
 }
}
origin: com.facebook.presto.hive/hive-apache

 @Override
 public void cleanupJob(JobContext context) throws IOException {
  getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));

  //Cancel HCat and JobTracker tokens
  IMetaStoreClient client = null;
  try {
   HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
   client = HCatUtil.getHiveMetastoreClient(hiveConf);
   String tokenStrForm = client.getTokenStrForm();
   if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
    client.cancelDelegationToken(tokenStrForm);
   }
  } catch (Exception e) {
   LOG.warn("Failed to cancel delegation token", e);
  } finally {
   HCatUtil.closeHiveClientQuietly(client);
  }
 }
}
origin: com.facebook.presto.hive/hive-apache

private void cancelDelegationTokens(JobContext context) throws IOException{
 LOG.info("Cancelling delegation token for the job.");
 IMetaStoreClient client = null;
 try {
  HiveConf hiveConf = HCatUtil
    .getHiveConf(context.getConfiguration());
  client = HCatUtil.getHiveMetastoreClient(hiveConf);
  // cancel the deleg. tokens that were acquired for this job now that
  // we are done - we should cancel if the tokens were acquired by
  // HCatOutputFormat and not if they were supplied by Oozie.
  // In the latter case the HCAT_KEY_TOKEN_SIGNATURE property in
  // the conf will not be set
  String tokenStrForm = client.getTokenStrForm();
  if (tokenStrForm != null
    && context.getConfiguration().get(
      HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
   client.cancelDelegationToken(tokenStrForm);
  }
 } catch (MetaException e) {
  LOG.warn("MetaException while cancelling delegation token.", e);
 } catch (TException e) {
  LOG.warn("TException while cancelling delegation token.", e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
}
origin: com.cloudera.recordservice/recordservice-hcatalog-pig-adapter

public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal,
  Job job) throws IOException {
 Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri);
 Table hcatTable = hcatTableCache.get(loc_server);
 if (hcatTable != null) {
  return hcatTable;
 }
 Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
 dbTablePair = HCatRSUtil.cleanQueryPair(dbTablePair);
 String dbName = dbTablePair.first;
 String tableName = dbTablePair.second;
 Table table = null;
 HiveMetaStoreClient client = null;
 try {
  client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal,
    PigHCatUtil.class, job);
  table = HCatUtil.getTable(client, dbName, tableName);
 } catch (NoSuchObjectException nsoe) {
  throw new PigException("Table not found : " + nsoe.getMessage(),
    PIG_EXCEPTION_CODE); // prettier error messages to frontend
 } catch (Exception e) {
  throw new IOException(e);
 } finally {
  HCatUtil.closeHiveClientQuietly(client);
 }
 hcatTableCache.put(loc_server, table);
 return table;
}
org.apache.hive.hcatalog.commonHCatUtilcloseHiveClientQuietly

Popular methods of HCatUtil

  • getHiveMetastoreClient
    Get or create a hive client depending on whether it exits in cache or not
  • getTable
  • deserialize
  • getHiveConf
  • extractSchema
  • getStorageHandler
    Create an instance of a storage handler defined in storerInfo. If one cannot be found then FosterSto
  • serialize
  • validateExecuteBitPresentIfReadOrWrite
    Ensure that read or write permissions are not granted without also granting execute permissions. Ess
  • validateMorePermissive
    Test if the first FsAction is more permissive than the second. This is useful in cases where we want
  • configureOutputStorageHandler
  • copyConf
    Replace the contents of dest with the contents of src
  • copyJobPropertiesToJobConf
  • copyConf,
  • copyJobPropertiesToJobConf,
  • decodeBytes,
  • encodeBytes,
  • extractThriftToken,
  • getFieldSchemaList,
  • getHCatFieldSchemaList,
  • getInputJobProperties,
  • getJobConfFromContext

Popular in Java

  • Creating JSON documents from java classes using gson
  • setContentView (Activity)
  • getResourceAsStream (ClassLoader)
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • Pointer (com.sun.jna)
    An abstraction for a native pointer data type. A Pointer instance represents, on the Java side, a na
  • PrintWriter (java.io)
    Wraps either an existing OutputStream or an existing Writerand provides convenience methods for prin
  • Runnable (java.lang)
    Represents a command that can be executed. Often used to run code in a different Thread.
  • URI (java.net)
    A Uniform Resource Identifier that identifies an abstract or physical resource, as specified by RFC
  • SecureRandom (java.security)
    This class generates cryptographically secure pseudo-random numbers. It is best to invoke SecureRand
  • Pattern (java.util.regex)
    Patterns are compiled regular expressions. In many cases, convenience methods such as String#matches
  • CodeWhisperer alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now