@Override public void onTaskEnd(SparkListenerTaskEnd taskEnd) { if (taskEnd.reason() instanceof org.apache.spark.Success$ && !taskEnd.taskInfo().speculative()) { Metrics metrics = new Metrics(taskEnd.taskMetrics(), taskEnd.taskInfo()); Integer jobId; synchronized (stageToJobId) { jobId = stageToJobId.get(taskEnd.stageId()); } // TODO: implement implicit AsyncRDDActions conversion instead of jc.monitor()? // TODO: how to handle stage failures? String clientId = getClientId(jobId); if (clientId != null) { protocol.sendMetrics(clientId, jobId, taskEnd.stageId(), taskEnd.taskInfo().taskId(), metrics); } } }
public Metrics(TaskMetrics metrics, TaskInfo taskInfo) { this( metrics.executorDeserializeTime(), TimeUnit.NANOSECONDS.toMillis(metrics.executorDeserializeCpuTime()), metrics.executorRunTime(), TimeUnit.NANOSECONDS.toMillis(metrics.executorCpuTime()), metrics.resultSize(), metrics.jvmGCTime(), metrics.resultSerializationTime(), metrics.memoryBytesSpilled(), metrics.diskBytesSpilled(), taskInfo.duration(), optionalInputMetric(metrics), optionalShuffleReadMetric(metrics), optionalShuffleWriteMetrics(metrics), optionalOutputMetrics(metrics)); }
@Override public void onTaskEnd(SparkListenerTaskEnd taskEnd) { try { String status = taskEnd.taskInfo().status(); .setStartTime(taskEnd.taskInfo().launchTime()) .setTaskId(String.valueOf(taskEnd.taskInfo().taskId())) .setStageId(String.valueOf(taskEnd.stageId())) .setStageAttemptId(String.valueOf(taskEnd.stageAttemptId())) .setExecutorHostname(String.valueOf(taskEnd.taskInfo().host())); tryToSet(() -> taskEventBuilder.setLocality(taskEnd.taskInfo().taskLocality().toString())); tryToSet(() -> taskEventBuilder.setType(taskEnd.taskType())); tryToSet(() -> taskEventBuilder.setAttemptNumber(taskEnd.taskInfo().attemptNumber())); tryToSet(() -> taskEventBuilder.setExecutorCpuTime(taskEnd.taskMetrics().executorCpuTime())); tryToSet(() -> taskEventBuilder.setExecutorDeserializeCpuTime(taskEnd.taskMetrics().executorDeserializeCpuTime())); this.eventHandler.accept(taskEnd.taskInfo().finishTime(), buildOverrideHeader(taskEnd.taskInfo().executorId()), taskEventBuilder.build()); } catch (Throwable t) { LOGGER.warn("Failed to send event for onTaskEnd", t);
@Override public void onTaskEnd(SparkListenerTaskEnd taskEnd) { super.onTaskEnd(taskEnd); String reason = taskEnd.reason().toString(); if (reason.equals("Success")) { sparkUIManager.taskEnd(taskEnd.stageId(), taskEnd.taskInfo().taskId()); } else if (reason.contains("stage cancelled")) { sparkUIManager.taskCancelled(taskEnd.stageId(), taskEnd.taskInfo().taskId()); } } });
@Override public void onTaskEnd(final SparkListenerTaskEnd taskEnd) { final TaskInfo taskInfo = taskEnd.taskInfo(); TimeoutManager.getInstance().setLastEventTime(taskEnd.stageId()); SparkJobTracker.recordTaskTime(taskEnd.stageId(), taskInfo.finishTime() - taskInfo.launchTime()); }
@Override public void onTaskStart(SparkListenerTaskStart taskStart) { super.onTaskStart(taskStart); sparkUIManager.taskStart(taskStart.stageId(), taskStart.taskInfo().taskId()); }
@Override public void onTaskEnd(SparkListenerTaskEnd taskEnd) { if (taskEnd.reason() instanceof org.apache.spark.Success$ && !taskEnd.taskInfo().speculative()) { Metrics metrics = new Metrics(taskEnd.taskMetrics()); Integer jobId; synchronized (stageToJobId) { jobId = stageToJobId.get(taskEnd.stageId()); } // TODO: implement implicit AsyncRDDActions conversion instead of jc.monitor()? // TODO: how to handle stage failures? String clientId = getClientId(jobId); if (clientId != null) { protocol.sendMetrics(clientId, jobId, taskEnd.stageId(), taskEnd.taskInfo().taskId(), metrics); } } }
@Override public void onTaskEnd(SparkListenerTaskEnd taskEnd) { Iterator<AccumulatorV2<?, ?>> iterator = taskEnd.taskMetrics().accumulators().iterator(); while (iterator.hasNext()) { AccumulatorV2 accumulator = iterator.next(); if (taskEnd.stageId() == 1 && accumulator.isRegistered() && accumulator.name().isDefined() && accumulator.name().get().equals("internal.metrics.shuffle.read.recordsRead")) { stageOneShuffleReadTaskRecordsCountMap.put(taskEnd.taskInfo().taskId(), (Long) accumulator.value()); } } } });
@Override public void onTaskEnd(SparkListenerTaskEnd taskEnd) { if (taskEnd.reason() instanceof org.apache.spark.Success$ && !taskEnd.taskInfo().speculative()) { Metrics metrics = new Metrics(taskEnd.taskMetrics()); Integer jobId; synchronized (stageToJobId) { jobId = stageToJobId.get(taskEnd.stageId()); } // TODO: implement implicit AsyncRDDActions conversion instead of jc.monitor()? // TODO: how to handle stage failures? String clientId = getClientId(jobId); if (clientId != null) { protocol.sendMetrics(clientId, jobId, taskEnd.stageId(), taskEnd.taskInfo().taskId(), metrics); } } }