@Test public void tezTask_updates_Metrics() throws IOException { Metrics mockMetrics = Mockito.mock(Metrics.class); TezTask tezTask = new TezTask(); tezTask.updateTaskMetrics(mockMetrics); verify(mockMetrics, times(1)).incrementCounter(MetricsConstant.HIVE_TEZ_TASKS); verify(mockMetrics, never()).incrementCounter(MetricsConstant.HIVE_SPARK_TASKS); verify(mockMetrics, never()).incrementCounter(MetricsConstant.HIVE_MR_TASKS); } }
session, conf, false, getWork().getLlapMode()); ss.setTezSession(session); try { getExtraLocalResources(jobConf, scratchDir, inputOutputJars); updateSession(session, jobConf, scratchDir, inputOutputJars, inputOutputLocalResources); logResources(additionalLr); DAG dag = build(jobConf, work, scratchDir, appJarLr, additionalLr, ctx); CallerContext callerContext = CallerContext.create( "HIVE", queryPlan.getQueryId(), addExtraResourcesToDag(session, dag, inputOutputJars, inputOutputLocalResources); dagClient = submit(jobConf, dag, scratchDir, appJarLr, session, additionalLr, inputOutputJars, inputOutputLocalResources); this.setException(new HiveException(monitor.getDiagnostics())); .returnSession(session, getWork().getLlapMode()); } catch (Exception e) { LOG.error("Failed to return session: {} to pool", session, e); rc = close(work, rc);
public static void processFileSink(GenTezProcContext context, FileSinkOperator fileSink) throws SemanticException { ParseContext parseContext = context.parseContext; boolean isInsertTable = // is INSERT OVERWRITE TABLE GenMapRedUtils.isInsertInto(parseContext, fileSink); HiveConf hconf = parseContext.getConf(); boolean chDir = GenMapRedUtils.isMergeRequired(context.moveTask, hconf, fileSink, context.currentTask, isInsertTable); Path finalName = GenMapRedUtils.createMoveTask(context.currentTask, chDir, fileSink, parseContext, context.moveTask, hconf, context.dependencyTask); if (chDir) { // Merge the files in the destination table/partitions by creating Map-only merge job // If underlying data is RCFile or OrcFile, RCFileBlockMerge task or // OrcFileStripeMerge task would be created. LOG.info("using CombineHiveInputformat for the merge job"); GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName, context.dependencyTask, context.moveTask, hconf, context.currentTask); } FetchTask fetchTask = parseContext.getFetchTask(); if (fetchTask != null && context.currentTask.getNumChild() == 0) { if (fetchTask.isFetchFrom(fileSink.getConf())) { context.currentTask.setFetchSource(true); } } }
LOG.info("Printing ORC row group counter for tez task: " + tezTask.getName()); TezCounters counters = tezTask.getTezCounters(); if (counters != null) { for (CounterGroup group : counters) { if (group.getName().equals(LlapIOCounters.class.getName())) { console.printInfo(tezTask.getId() + " LLAP IO COUNTERS:", false); for (TezCounter counter : group) { if (counter.getDisplayName().equals(LlapIOCounters.SELECTED_ROWGROUPS.name())) {
String userName = getUserNameForGroups(ss); List<String> groups = null; if (userName == null) { sessionRef.value, conf, mi, getWork().getLlapMode(), wmContext); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_GET_SESSION); ensureSessionHasResources(session, allNonConfFiles); logResources(allNonAppResources); DAG dag = build(jobConf, work, scratchDir, ctx, allResources); dag.setCallerContext(callerContext); throw new HiveException("Operation cancelled"); DAGClient dagClient = submit(jobConf, dag, sessionRef); session = sessionRef.value; boolean wasShutdown = false; closeDagClientOnCancellation(dagClient); throw new HiveException("Operation cancelled"); this.setException(new HiveException(monitor.getDiagnostics())); rc = close(work, rc, dagClient);
@Override public Operator<? extends OperatorDesc> getReducer(MapWork mapWork) { List<BaseWork> children = getWork().getChildren(mapWork); if (children.size() != 1) { return null; } if (!(children.get(0) instanceof ReduceWork)) { return null; } return ((ReduceWork)children.get(0)).getReducer(); }
getExtraLocalResources(jobConf, scratchDir, inputOutputJars); updateSession(session, jobConf, scratchDir, inputOutputJars, inputOutputLocalResources); DAG dag = build(jobConf, work, scratchDir, appJarLr, additionalLr, ctx); addExtraResourcesToDag(session, dag, inputOutputJars, inputOutputLocalResources); client = submit(jobConf, dag, scratchDir, appJarLr, session, additionalLr, inputOutputJars, inputOutputLocalResources); rc = monitor.monitorExecution(client, ctx.getHiveTxnManager(), conf, dag); if (rc != 0) { this.setException(new HiveException(monitor.getDiagnostics())); rc = close(work, rc);
TezWork tezWork = context.currentTask.getWork(); if (BasicStatsNoJobTask.canUseFooterScan(table, inputFormat)) { context.currentTask.addDependentTask(statsTask);
work.connect(rws[0], rws[1], edgeProp); task = new TezTask(utils); task.setWork(work); task.setConsole(mock(LogHelper.class)); QueryPlan mockQueryPlan = mock(QueryPlan.class); doReturn(UUID.randomUUID().toString()).when(mockQueryPlan).getQueryId(); task.setQueryPlan(mockQueryPlan);
List<TezTask> rootTasks = Utilities.getTezTasks(plan.getRootTasks()); for (TezTask tezTask : rootTasks) { List<BaseWork> baseWorks = tezTask.getWork().getAllWork(); for (BaseWork baseWork : baseWorks) { String vertexName = baseWork.getName(); LOG.debug("Reading runtime statistics for tez vertex task: {}", vertexName); TezCounters counters = tezTask.getTezCounters(); if (counters != null) { String groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
setAccessControlsForCurrentUser(dag, queryPlan.getQueryId(), conf); checkOutputSpec(w, parentConf); checkOutputSpec(w, wxConf); Vertex wx = utils.createVertex(wxConf, w, scratchDir, fs, ctx, !isFinal, work, work.getVertexType(w), vertexResources);
mergeWork.setName("File Merge"); tezWork.add(mergeWork); task = new TezTask(); task.setWork(tezWork); } else {
@Test public void testEmptyWork() throws IllegalArgumentException, IOException, Exception { DAG dag = task.build(conf, new TezWork("", null), path, new Context(conf), DagUtils.createTezLrMap(appLr, null)); assertEquals(dag.getVertices().size(), 0); }
@Test public void testClose() throws HiveException { task.close(work, 0, null); verify(op, times(4)).jobClose(any(Configuration.class), eq(true)); }
@Test public void testSubmit() throws Exception { DAG dag = DAG.create("test"); task.submit(conf, dag, Ref.from(sessionState)); // validate close/reopen verify(sessionState, times(1)).reopen(); verify(session, times(2)).submitDAG(any(DAG.class)); }
.map(ddlPreInsertTask -> new InsertCommitHookDesc(ddlPreInsertTask.getTable(), ddlPreInsertTask.isOverwrite())) .forEach(insertCommitHookDesc -> tezTask.addDependentTask( TaskFactory.get(new DDLWork(getInputs(), getOutputs(), insertCommitHookDesc), conf)));
@Override public boolean connect(Configuration hconf, Task sourceTask) { if (!(sourceTask instanceof TezTask)) { delegate = true; return mrAggregator.connect(hconf, sourceTask); } counters = ((TezTask) sourceTask).getTezCounters(); return counters != null; }