@Override public Hive getHive() throws HiveException { return Hive.get((HiveConf)conf); }
@Override public void run() { try { RuntimeStat rec = encode(map); Hive.get().getMSC().addRuntimeStat(rec); } catch (TException | HiveException | IOException e) { logException("Exception while persisting runtime stat", e); } } }
private static Set<Partition> getAllPartitions(Table tab) throws HiveException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); Set<Partition> result = Hive.get().getAllPartitionsOf(tab); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); return result; }
/** * @return List containing Indexes names if there are indexes on this table * @throws HiveException **/ public static List<Index> getAllIndexes(Table table, short max) throws HiveException { Hive hive = Hive.get(); return hive.getIndexes(table.getTTable().getDbName(), table.getTTable().getTableName(), max); }
protected static Map<String, String> getColNameToDefaultValueMap(Table tbl) throws SemanticException { Map<String, String> colNameToDefaultVal = null; try { DefaultConstraint dc = Hive.get().getEnabledDefaultConstraints(tbl.getDbName(), tbl.getTableName()); colNameToDefaultVal = dc.getColNameToDefaultValueMap(); } catch (Exception e) { if (e instanceof SemanticException) { throw (SemanticException) e; } else { throw (new RuntimeException(e)); } } return colNameToDefaultVal; }
public static Hive get() throws HiveException { return get(true); }
@Override public IMetaStoreClient getHiveMetastoreClient() throws HiveAuthzPluginException { String errMsg = "Error getting metastore client"; try { return Hive.get().getMSC(); } catch (MetaException e) { throw new HiveAuthzPluginException(errMsg, e); } catch (HiveException e) { throw new HiveAuthzPluginException(errMsg, e); } }
private static Set<Partition> getAllPartitions(Table tab) throws HiveException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); Set<Partition> result = Hive.get().getAllPartitionsOf(tab); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); return result; }
private ImmutableMap<String, Long> dumpMetaCallTimingWithoutEx(String phase) { try { return Hive.get().dumpAndClearMetaCallTiming(phase); } catch (HiveException he) { LOG.warn("Caught exception attempting to write metadata call information " + he, he); } return null; }
public PrivilegeSynchronizer(LeaderLatch privilegeSynchronizerLatch, PolicyProviderContainer policyProviderContainer, HiveConf hiveConf) { this.hiveConf = new HiveConf(hiveConf); this.hiveConf.set(MetastoreConf.ConfVars.FILTER_HOOK.getVarname(), DefaultMetaStoreFilterHookImpl.class.getName()); try { hiveClient = Hive.get(this.hiveConf).getMSC(); } catch (Exception e) { throw new RuntimeException("Error creating HiveMetastoreClient", e); } this.privilegeSynchronizerLatch = privilegeSynchronizerLatch; this.policyProviderContainer = policyProviderContainer; this.hiveConf = hiveConf; }
protected static Hive createHiveDB(HiveConf conf) throws SemanticException { try { return Hive.get(conf); } catch (HiveException e) { throw new SemanticException(e); } }
HiveUpdater(HiveConf conf, boolean fileRename) throws HiveException { hive = Hive.get(conf); Hive.set(hive); doFileRename = fileRename; }
public void init(Configuration conf) throws HiveException { hive_db = new HiveProxy(Hive.get(conf, HiveAuthorizationProvider.class)); }
private ImmutableMap<String, Long> dumpMetaCallTimingWithoutEx(String phase) { try { return Hive.get().dumpAndClearMetaCallTiming(phase); } catch (HiveException he) { LOG.warn("Caught exception attempting to write metadata call information " + he, he); } return null; }
public static Hive get(Configuration c, Class<?> clazz) throws HiveException { return get(c instanceof HiveConf ? (HiveConf)c : new HiveConf(c, clazz)); }
protected static Hive createHiveDB(HiveConf conf) throws SemanticException { try { return Hive.get(conf); } catch (HiveException e) { throw new SemanticException(e); } }
/** * Test basic Hive class interaction, that: * - We can have different Hive objects throughout the lifetime of this thread. */ public void testHiveCloseCurrent() throws Throwable { Hive hive1 = Hive.get(); Hive.closeCurrent(); Hive hive2 = Hive.get(); Hive.closeCurrent(); assertTrue(hive1 != hive2); }
@Override public void run(HookContext hookContext) throws Exception { SessionState ss = SessionState.get(); if (ss != null && ss.getConf().get(HiveConf.ConfVars.HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL.varname) .equals("DUMMY")) { HiveMaterializedViewsRegistry.get().init(Hive.get(ss.getConf())); } }
@AfterClass public static void deInit() throws Exception { Hive h = Hive.get(conf); h.dropTable("foo"); }
private void cancelDelegationToken() throws HiveSQLException { if (hmsDelegationTokenStr != null) { try { Hive.get(getHiveConf()).cancelDelegationToken(hmsDelegationTokenStr); hmsDelegationTokenStr = null; getHiveConf().setVar(HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE, ""); } catch (HiveException e) { throw new HiveSQLException("Couldn't cancel delegation token", e); } } }