@Override public void run() { try { SessionState.setCurrentSessionState(initSessionState); runExpirationThread(); } catch (Exception e) { LOG.warn("Exception in TezSessionPool-expiration thread. Thread will shut down", e); } finally { LOG.info("TezSessionPool-expiration thread exiting"); } } }, "TezSessionPool-expiration");
@Override public void run() { try { SessionState.setCurrentSessionState(initSessionState); runRestartThread(); } catch (Exception e) { LOG.warn("Exception in TezSessionPool-cleanup thread. Thread will shut down", e); } finally { LOG.info("TezSessionPool-cleanup thread exiting"); } } }, "TezSessionPool-cleanup");
public void run() { if (parentSessionState != null) { SessionState.setCurrentSessionState(parentSessionState); } while (true) { TezSessionPoolSession session = initialSessions.poll(); if (session == null) break; try { startInitialSession(session); } catch (Exception e) { if (!firstError.compareAndSet(null, e)) { LOG.error("Failed to start session; ignoring due to previous error", e); // TODO Why even continue after this. We're already in a state where things are messed up ? } } } } };
@Override public void isLeader() { LOG.info("HS2 instance {} became the LEADER. Starting/Reconnecting tez sessions..", hiveServer2.serviceUri); hiveServer2.isLeader.set(true); if (parentSession != null) { SessionState.setCurrentSessionState(parentSession); } hiveServer2.startOrReconnectTezSessions(); LOG.info("Started/Reconnected tez sessions."); hiveServer2.allowClientSessions(); // resolve futures used for testing if (HiveConf.getBoolVar(hiveServer2.getHiveConf(), ConfVars.HIVE_IN_TEST)) { hiveServer2.isLeaderTestFuture.set(true); hiveServer2.resetNotLeaderTestFuture(); } }
@Override public Boolean call() throws Exception { SessionState.setCurrentSessionState(parentSession); return FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
@Override public SparkSession call() throws Exception { SessionState.setCurrentSessionState(sessionState); return SparkUtilities.getSparkSession(hiveConf, sparkSessionManager); } }
private void configureSession(Map<String, String> sessionConfMap) throws HiveSQLException { SessionState.setCurrentSessionState(sessionState); for (Map.Entry<String, String> entry : sessionConfMap.entrySet()) { String key = entry.getKey(); if (key.startsWith("set:")) { try { SetProcessor.setVariable(key.substring(4), entry.getValue()); } catch (Exception e) { throw new HiveSQLException(e); } } else if (key.startsWith("use:")) { try { if (sessionHive.getDatabase(entry.getValue()) == null) { throw new HiveSQLException("Database " + entry.getValue() + " does not exist"); } } catch (HiveException e) { throw new HiveSQLException(e); } SessionState.get().setCurrentDatabase(entry.getValue()); } else { sessionConf.verifyAndSet(key, entry.getValue()); } } }
public static SessionState setUpSessionState(HiveConf conf, String user, boolean doStart) { SessionState sessionState = SessionState.get(); if (sessionState == null) { // Note: we assume that workers run on the same threads repeatedly, so we can set up // the session here and it will be reused without explicitly storing in the worker. sessionState = new SessionState(conf, user); if (doStart) { // TODO: Required due to SessionState.getHDFSSessionPath. Why wasn't it required before? sessionState.setIsHiveServerQuery(true); SessionState.start(sessionState); } SessionState.setCurrentSessionState(sessionState); } return sessionState; } }
private synchronized void acquireAfterOpLock(boolean userAccess) { // Need to make sure that the this HiveServer2's session's SessionState is // stored in the thread local for the handler thread. SessionState.setCurrentSessionState(sessionState); sessionState.setForwardedAddresses(SessionManager.getForwardedAddresses()); sessionState.setIsUsingThriftJDBCBinarySerDe(updateIsUsingThriftJDBCBinarySerDe()); if (userAccess) { lastAccessTime = System.currentTimeMillis(); lockedByUser = true; } // set the thread name with the logging prefix. sessionState.updateThreadName(); // If the thread local Hive is different from sessionHive, it means, the previous query execution in // master thread has re-created Hive object due to changes in MS related configurations in sessionConf. // So, it is necessary to reset sessionHive object based on new sessionConf. Here, we cannot, // directly set sessionHive with thread local Hive because if the previous command was REPL LOAD, then // the config changes lives only within command execution not in session level. // So, the safer option is to invoke Hive.get() which decides if to reuse Thread local Hive or re-create it. if (Hive.getThreadLocal() != sessionHive) { try { setSessionHive(); } catch (HiveSQLException e) { throw new RuntimeException(e); } } }
SessionState.setCurrentSessionState(parentSessionState);
public TestExpressionEvaluator() { SessionState.setCurrentSessionState(ss);
callables.add(() -> { SessionState ss = (reuseSession)? sessionState : new SessionState(conf); SessionState.setCurrentSessionState(ss);
@Override public void run() { try { SessionState.setCurrentSessionState(ss); Random random = new Random(Thread.currentThread().getId()); String threadName = Thread.currentThread().getName(); System.out.println(threadName + " started."); HiveConf conf = getHiveConf(); SparkSession prevSession = null; SparkSession currentSession = null; for(int i = 0; i < 5; i++) { currentSession = sessionManagerHS2.getSession(prevSession, conf, true); assertTrue(prevSession == null || prevSession == currentSession); assertTrue(currentSession.isOpen()); System.out.println(String.format("%s got session (%d): %s", threadName, i, currentSession.getSessionId())); Thread.sleep((random.nextInt(3)+1) * 1000); sessionManagerHS2.returnSession(currentSession); prevSession = currentSession; } sessionManagerHS2.closeSession(currentSession); System.out.println(threadName + " ended."); } catch (Throwable e) { anyFailedSessionThread = true; String msg = String.format("Error executing '%s'", Thread.currentThread().getName()); LOG.error(msg, e); fail(msg + " " + StringUtils.stringifyException(e)); } } }
@Before public void setup() throws Exception { SessionState ss = new SessionState(new HiveConf()); SessionState.setCurrentSessionState(ss);
public AbstractTestGenericUDFOPNumeric() { // Arithmetic operations rely on getting conf from SessionState, need to initialize here. SessionState ss = new SessionState(new HiveConf()); ss.getConf().setVar(HiveConf.ConfVars.HIVE_COMPAT, "latest"); SessionState.setCurrentSessionState(ss); }
public TestVectorArithmetic() { // Arithmetic operations rely on getting conf from SessionState, need to initialize here. SessionState ss = new SessionState(new HiveConf()); ss.getConf().setVar(HiveConf.ConfVars.HIVE_COMPAT, "default"); SessionState.setCurrentSessionState(ss); }
public TestVectorFilterCompare() { // Arithmetic operations rely on getting conf from SessionState, need to initialize here. SessionState ss = new SessionState(new HiveConf()); ss.getConf().setVar(HiveConf.ConfVars.HIVE_COMPAT, "latest"); SessionState.setCurrentSessionState(ss); }
public TestVectorNegative() { // Arithmetic operations rely on getting conf from SessionState, need to initialize here. SessionState ss = new SessionState(new HiveConf()); ss.getConf().setVar(HiveConf.ConfVars.HIVE_COMPAT, "latest"); SessionState.setCurrentSessionState(ss); }
" is not valid. Context: " + query); SessionState.setCurrentSessionState(sessionState); boolean isOk = false; try {
@Override public Object run() throws HiveSQLException { assert (!parentHive.allowClose()); Hive.set(parentHive); // TODO: can this result in cross-thread reuse of session state? SessionState.setCurrentSessionState(parentSessionState); PerfLogger.setPerfLogger(SessionState.getPerfLogger()); LogUtils.registerLoggingContext(queryState.getConf()); ShimLoader.getHadoopShims().setHadoopQueryContext(queryState.getQueryId()); try { if (asyncPrepare) { prepare(queryState); } runQuery(); } catch (HiveSQLException e) { // TODO: why do we invent our own error path op top of the one from Future.get? setOperationException(e); LOG.error("Error running hive query: ", e); } finally { LogUtils.unregisterLoggingContext(); // If new hive object is created by the child thread, then we need to close it as it might // have created a hms connection. Call Hive.closeCurrent() that closes the HMS connection, causes // HMS connection leaks otherwise. Hive.closeCurrent(); } return null; } };