public String getSubmittedId() { if (job == null ) { return null; } JobID submittedJobId = job.getJobID(); if (submittedJobId == null) { return null; } else { return submittedJobId.toString(); } }
@Override public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job) { return new JobContextImpl(job.getConfiguration(), job.getJobID()); }
public Map<String, String> getInfo() { if (job != null) { Map<String, String> status = new HashMap<String, String>(); if (null != job.getJobID()) { status.put(ExecutableConstants.MR_JOB_ID, job.getJobID().toString()); } if (null != job.getTrackingURL()) { status.put(ExecutableConstants.YARN_APP_URL, job.getTrackingURL().toString()); } return status; } else { return Collections.emptyMap(); } }
public Map<String, String> getInfo() throws JobException { if (job != null) { Map<String, String> status = new HashMap<String, String>(); if (null != job.getJobID()) { status.put(JobInstance.MR_JOB_ID, job.getJobID().toString()); } if (null != job.getTrackingURL()) { status.put(JobInstance.YARN_APP_URL, job.getTrackingURL().toString()); } return status; } else { throw new JobException("Job is null"); } }
job.getJobID().toString(), HadoopCompatLoader.DEFAULT_COMPAT.getJobFailureString(job)); } catch (RuntimeException e) { f = "Job failed (unable to read job status programmatically -- see MapReduce logs for information)";
@Override public void cancel() throws IOException { try { for (Map.Entry<Dataset, Job> entry : MRCompactor.RUNNING_MR_JOBS.entrySet()) { Job hadoopJob = entry.getValue(); if (!hadoopJob.isComplete()) { LOG.info(String.format("Killing hadoop job %s for dataset %s", hadoopJob.getJobID(), entry.getKey())); hadoopJob.killJob(); } } } finally { try { ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG), 0, TimeUnit.NANOSECONDS); } finally { if (this.verifier.isPresent()) { this.verifier.get().closeNow(); } } } }
public static JobStepStatusEnum checkStatus(Job job, StringBuilder output) { if (job == null || job.getJobID() == null) { output.append("Skip status check with empty job id..\n"); return JobStepStatusEnum.WAITING;
JobID submittedJobId = job.getJobID(); if(metastoreTokenStrForm != null) {
String jobID = job.getJobID().toString(); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID);
public static void cleanup(Job job) throws IOException { final Path jobDir = getJobPath(job.getJobID(), job.getWorkingDirectory()); final FileSystem fs = jobDir.getFileSystem(job.getConfiguration()); RuntimeException e = null; try { JobHelper.deleteWithRetry(fs, jobDir, true); } catch (RuntimeException ex) { e = ex; } try { JobHelper.deleteWithRetry(fs, getJobClassPathDir(job.getJobName(), job.getWorkingDirectory()), true); } catch (RuntimeException ex) { if (e == null) { e = ex; } else { e.addSuppressed(ex); } } if (e != null) { throw e; } }
if (groupByJob.getJobID() != null) { JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), groupByJob.getJobID().toString()); log.error("Job failed: %s", groupByJob.getJobID()); failureCause = Utils.getFailureMessage(groupByJob, config.JSON_MAPPER); return false; if (dimSelectionJob.getJobID() != null) { JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), dimSelectionJob.getJobID().toString()); log.error("Job failed: %s", dimSelectionJob.getJobID().toString()); failureCause = Utils.getFailureMessage(dimSelectionJob, config.JSON_MAPPER); return false;
compactState.setProp(CompactionSlaEventHelper.RECORD_COUNT_TOTAL, Long.toString(newTotalRecords)); compactState.setProp(CompactionSlaEventHelper.EXEC_COUNT_TOTAL, Long.toString(executeCount + 1)); compactState.setProp(CompactionSlaEventHelper.MR_JOB_ID, this.configurator.getConfiguredJob().getJobID().toString()); helper.saveState(new Path (result.getDstAbsoluteDir()), compactState); CompactionSlaEventHelper.PREV_RECORD_COUNT_TOTAL, Long.toString(oldTotalRecords), CompactionSlaEventHelper.EXEC_COUNT_TOTAL, Long.toString(executeCount + 1), CompactionSlaEventHelper.MR_JOB_ID, this.configurator.getConfiguredJob().getJobID().toString()); this.eventSubmitter.submit(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT, eventMetadataMap);
job.killJob(); } catch (Exception e) { logger.warn("failed to kill hadoop job: " + job.getJobID(), e);
public Map<String, String> getInfo() { if (job != null) { Map<String, String> status = new HashMap<String, String>(); if (null != job.getJobID()) { status.put(ExecutableConstants.MR_JOB_ID, job.getJobID().toString()); } if (null != job.getTrackingURL()) { status.put(ExecutableConstants.YARN_APP_URL, job.getTrackingURL().toString()); } return status; } else { return Collections.emptyMap(); } }
protected void moveTmpPathToOutputPath() throws IOException { Path oldPath = null; if (fs.exists(outputPath)) { oldPath = new Path("/tmp", "_old_" + job.getJobID()); moveExistingContentInOutputPathToOldPath(oldPath); } log.info("Moving " + tmpPath + " to " + outputPath); mkdirs(fs, outputPath.getParent(), perm, job.getConfiguration()); if (!fs.rename(tmpPath, outputPath)) { fs.rename(oldPath, outputPath); fs.delete(tmpPath, true); throw new RuntimeException("Error: cannot rename " + tmpPath + " to " + outputPath); } deleteOldPath(oldPath); }
JobID jobId = job.getJobID();