void close() { if (hive != null) { runAndLogErrors(() -> Hive.closeCurrent()); hive = null; } }
@Override public synchronized void stop() { super.stop(); // Release the HMS connection for this service thread Hive.closeCurrent(); }
@Override public void run() { runner = Thread.currentThread(); try { SessionState.start(ss); runSequential(); } finally { try { // Call Hive.closeCurrent() that closes the HMS connection, causes // HMS connection leaks otherwise. Hive.closeCurrent(); } catch (Exception e) { LOG.warn("Exception closing Metastore connection:" + e.getMessage()); } runner = null; result.setRunning(false); } }
@Override public void run() { runner = Thread.currentThread(); try { OperationLog.setCurrentOperationLog(operationLog); SessionState.start(ss); runSequential(); } finally { try { // Call Hive.closeCurrent() that closes the HMS connection, causes // HMS connection leaks otherwise. Hive.closeCurrent(); } catch (Exception e) { LOG.warn("Exception closing Metastore connection:" + e.getMessage()); } runner = null; result.setRunning(false); } }
/** * Test basic Hive class interaction, that: * - We can have different Hive objects throughout the lifetime of this thread. */ public void testHiveCloseCurrent() throws Throwable { Hive hive1 = Hive.get(); Hive.closeCurrent(); Hive hive2 = Hive.get(); Hive.closeCurrent(); assertTrue(hive1 != hive2); }
private static Hive create(HiveConf c, boolean needsRefresh, Hive db, boolean doRegisterAllFns) throws HiveException { if (db != null) { LOG.debug("Creating new db. db = " + db + ", needsRefresh = " + needsRefresh + ", db.isCurrentUserOwner = " + db.isCurrentUserOwner()); db.close(); } closeCurrent(); if (c == null) { c = createHiveConf(); } c.set("fs.scheme.class", "dfs"); Hive newdb = new Hive(c, doRegisterAllFns); hiveDB.set(newdb); return newdb; }
private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFastCheck, boolean doRegisterAllFns) throws HiveException { Hive db = hiveDB.get(); if (db == null || !db.isCurrentUserOwner() || needsRefresh || (c != null && !isCompatible(db, c, isFastCheck))) { if (db != null) { LOG.debug("Creating new db. db = " + db + ", needsRefresh = " + needsRefresh + ", db.isCurrentUserOwner = " + db.isCurrentUserOwner()); closeCurrent(); } db = create(c, doRegisterAllFns); } if (c != null) { db.conf = c; } return db; }
@After public void tearDown() throws Exception { dropDbTable(); Hive.closeCurrent(); }
@Override protected void tearDown() throws Exception { try { super.tearDown(); // disable trash hiveConf.setFloat("fs.trash.checkpoint.interval", 30); // FS_TRASH_CHECKPOINT_INTERVAL_KEY (hadoop-2) hiveConf.setFloat("fs.trash.interval", 30); // FS_TRASH_INTERVAL_KEY (hadoop-2) Hive.closeCurrent(); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); System.err .println("Unable to close Hive Metastore using configruation: \n " + hiveConf); throw e; } }
} finally { Hive.closeCurrent();
public String getDelegationTokenFromMetaStore(String owner) throws HiveSQLException, UnsupportedOperationException, LoginException, IOException { HiveConf hiveConf = getHiveConf(); if (!hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL) || !hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) { throw new UnsupportedOperationException( "delegation token is can only be obtained for a secure remote metastore"); } try { Hive.closeCurrent(); return Hive.get(hiveConf).getDelegationToken(owner, owner); } catch (HiveException e) { if (e.getCause() instanceof UnsupportedOperationException) { throw (UnsupportedOperationException)e.getCause(); } else { throw new HiveSQLException("Error connect metastore to setup impersonation", e); } } }
Hive.closeCurrent(); } catch (Throwable t) { LOG.warn("Error closing thread local Hive", t);
private IMetaStoreClient getMetaStoreClient(boolean retryInCaseOfTokenExpiration) throws HiveSQLException { try { return Hive.get(getHiveConf()).getMSC(); } catch (HiveException e) { throw new HiveSQLException("Failed to get metastore connection: " + e, e); } catch(MetaException e1) { if (hmsDelegationTokenStr != null && retryInCaseOfTokenExpiration) { LOG.info("Retrying failed metastore connection: " + e1, e1); Hive.closeCurrent(); try { setDelegationToken(Hive.get(getHiveConf()).getDelegationToken(sessionUgi.getUserName(), getUserName())); } catch (HiveException e2) { throw new HiveSQLException("Error connect metastore to setup impersonation: " + e2, e2); } return getMetaStoreClient(false); } else { throw new HiveSQLException("Failed to get metastore connection: " + e1, e1); } } }
} finally { Hive.closeCurrent();
@Override public Object run() throws HiveSQLException { assert (!parentHive.allowClose()); Hive.set(parentHive); // TODO: can this result in cross-thread reuse of session state? SessionState.setCurrentSessionState(parentSessionState); PerfLogger.setPerfLogger(SessionState.getPerfLogger()); LogUtils.registerLoggingContext(queryState.getConf()); ShimLoader.getHadoopShims().setHadoopQueryContext(queryState.getQueryId()); try { if (asyncPrepare) { prepare(queryState); } runQuery(); } catch (HiveSQLException e) { // TODO: why do we invent our own error path op top of the one from Future.get? setOperationException(e); LOG.error("Error running hive query: ", e); } finally { LogUtils.unregisterLoggingContext(); // If new hive object is created by the child thread, then we need to close it as it might // have created a hms connection. Call Hive.closeCurrent() that closes the HMS connection, causes // HMS connection leaks otherwise. Hive.closeCurrent(); } return null; } };
public void shutdown() throws Exception { if (System.getenv(QTEST_LEAVE_FILES) == null) { cleanUp(); } if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) { SessionState.get().getTezSession().destroy(); } setup.tearDown(); if (sparkSession != null) { try { SparkSessionManagerImpl.getInstance().closeSession(sparkSession); } catch (Exception ex) { LOG.error("Error closing spark session.", ex); } finally { sparkSession = null; } } if (mr != null) { mr.shutdown(); mr = null; } FileSystem.closeAll(); if (dfs != null) { dfs.shutdown(); dfs = null; } Hive.closeCurrent(); }
@Override public synchronized void stop() { super.stop(); // Release the HMS connection for this service thread Hive.closeCurrent(); }
private void cancelDelegationToken() throws HiveSQLException { if (delegationTokenStr != null) { try { Hive.get(getHiveConf()).cancelDelegationToken(delegationTokenStr); } catch (HiveException e) { throw new HiveSQLException("Couldn't cancel delegation token", e); } // close the metastore connection created with this delegation token Hive.closeCurrent(); } }
private void cancelDelegationToken() throws HiveSQLException { if (delegationTokenStr != null) { try { Hive.get(getHiveConf()).cancelDelegationToken(delegationTokenStr); } catch (HiveException e) { throw new HiveSQLException("Couldn't cancel delegation token", e); } // close the metastore connection created with this delegation token Hive.closeCurrent(); } }
private void cancelDelegationToken() throws HiveSQLException { if (delegationTokenStr != null) { try { Hive.get(getHiveConf()).cancelDelegationToken(delegationTokenStr); } catch (HiveException e) { throw new HiveSQLException("Couldn't cancel delegation token", e); } // close the metastore connection created with this delegation token Hive.closeCurrent(); } }