public static Path getJobPath(JobID jobID, Path workingDirectory) { return new Path(workingDirectory, jobID.toString()); }
/** * Gets job file. * * @param conf Configuration. * @param usr User. * @param jobId Job ID. * @return Job file. */ public static Path jobFile(Configuration conf, String usr, JobID jobId) { return new Path(stagingAreaDir(conf, usr), jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE); }
public String getSubmittedId() { if (job == null ) { return null; } JobID submittedJobId = job.getJobID(); if (submittedJobId == null) { return null; } else { return submittedJobId.toString(); } }
/** * Returns all jobs tagged with the given tag that have been started after the * given timestamp. Returned jobIds are MapReduce JobIds. */ @Override public Set<String> getJobs(String tag, long timestamp) { Set<ApplicationId> childYarnJobs = getYarnChildJobs(tag, timestamp); Set<String> childJobs = new HashSet<String>(); for(ApplicationId id : childYarnJobs) { // Convert to a MapReduce job id String childJobId = TypeConverter.fromYarn(id).toString(); childJobs.add(childJobId); } return childJobs; }
String percent = TempletonUtils.extractPercentComplete(line); String childid = TempletonUtils.extractChildJobId(line); updateJobStatePercentAndChildId(conf, jobid.toString(), percent, childid);
public Map<String, String> getInfo() { if (job != null) { Map<String, String> status = new HashMap<String, String>(); if (null != job.getJobID()) { status.put(ExecutableConstants.MR_JOB_ID, job.getJobID().toString()); } if (null != job.getTrackingURL()) { status.put(ExecutableConstants.YARN_APP_URL, job.getTrackingURL().toString()); } return status; } else { return Collections.emptyMap(); } }
job.getJobID().toString(), HadoopCompatLoader.DEFAULT_COMPAT.getJobFailureString(job)); } catch (RuntimeException e) { f = "Job failed (unable to read job status programmatically -- see MapReduce logs for information)";
public static String getApplicationWebURLOnJHSWithoutScheme(Configuration conf, ApplicationId appId) throws UnknownHostException { //construct the history url for job String addr = getJHSWebappURLWithoutScheme(conf); Iterator<String> it = ADDR_SPLITTER.split(addr).iterator(); it.next(); // ignore the bind host String port = it.next(); // Use hs address to figure out the host for webapp addr = conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS); String host = ADDR_SPLITTER.split(addr).iterator().next(); String hsAddress = JOINER.join(host, ":", port); InetSocketAddress address = NetUtils.createSocketAddr( hsAddress, getDefaultJHSWebappPort(), getDefaultJHSWebappURLWithoutScheme()); StringBuffer sb = new StringBuffer(); if (address.getAddress().isAnyLocalAddress() || address.getAddress().isLoopbackAddress()) { sb.append(InetAddress.getLocalHost().getCanonicalHostName()); } else { sb.append(address.getHostName()); } sb.append(":").append(address.getPort()); sb.append("/jobhistory/job/"); JobID jobId = TypeConverter.fromYarn(appId); sb.append(jobId.toString()); return sb.toString(); }
public Map<String, String> getInfo() throws JobException { if (job != null) { Map<String, String> status = new HashMap<String, String>(); if (null != job.getJobID()) { status.put(JobInstance.MR_JOB_ID, job.getJobID().toString()); } if (null != job.getTrackingURL()) { status.put(JobInstance.YARN_APP_URL, job.getTrackingURL().toString()); } return status; } else { throw new JobException("Job is null"); } }
killLauncherChildJobs(conf, context.getJobID().toString()); context.getJobID().toString(), conf.get("user.name"), conf.get(OVERRIDE_CLASSPATH)); updateJobStateToDoneAndWriteExitValue(conf, statusdir, context.getJobID().toString(), proc.exitValue()); LOG.info("templeton: collecting logs for " + context.getJobID().toString() + " to " + statusdir + "/logs"); LogRetriever logRetriever = new LogRetriever(statusdir, jobType, conf);
String jobID = job.getJobID().toString(); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID);
WebHCatJTShim tracker = ShimLoader.getHadoopShims().getWebHCatShim(conf, ugi); try { Set<String> childJobs = tracker.getJobs(context.getJobID().toString(), startTime); if (childJobs.size() == 0) { LOG.info("No child jobs found to reconnect with"); updateJobStatePercentAndChildId(conf, context.getJobID().toString(), null, childJobIdString); exitCode = 1; updateJobStateToDoneAndWriteExitValue(conf, statusdir, context.getJobID().toString(), exitCode); break; updateJobStatePercentAndChildId(conf, context.getJobID().toString(), percent, null);
/** * @param ctx Context for IO operations. */ public HadoopV2Context(HadoopV2TaskContext ctx) { super(ctx.jobConf(), ctx.jobContext().getJobID()); taskAttemptID = ctx.attemptId(); conf.set(MRJobConfig.ID, taskAttemptID.getJobID().toString()); conf.set(MRJobConfig.TASK_ID, taskAttemptID.getTaskID().toString()); conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskAttemptID.toString()); output = ctx.output(); input = ctx.input(); this.ctx = ctx; }
compactState.setProp(CompactionSlaEventHelper.RECORD_COUNT_TOTAL, Long.toString(newTotalRecords)); compactState.setProp(CompactionSlaEventHelper.EXEC_COUNT_TOTAL, Long.toString(executeCount + 1)); compactState.setProp(CompactionSlaEventHelper.MR_JOB_ID, this.configurator.getConfiguredJob().getJobID().toString()); helper.saveState(new Path (result.getDstAbsoluteDir()), compactState); CompactionSlaEventHelper.PREV_RECORD_COUNT_TOTAL, Long.toString(oldTotalRecords), CompactionSlaEventHelper.EXEC_COUNT_TOTAL, Long.toString(executeCount + 1), CompactionSlaEventHelper.MR_JOB_ID, this.configurator.getConfiguredJob().getJobID().toString()); this.eventSubmitter.submit(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT, eventMetadataMap);
JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), groupByJob.getJobID().toString()); JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), dimSelectionJob.getJobID().toString()); log.error("Job failed: %s", dimSelectionJob.getJobID().toString()); failureCause = Utils.getFailureMessage(dimSelectionJob, config.JSON_MAPPER); return false;
JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), job.getJobID().toString());
JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), groupByJob.getJobID().toString());
LOG.info(String.format("Waiting for Hadoop MR job %s to complete", this.job.getJobID())); this.job.waitForCompletion(true); mrJobRunTimer.stop(ImmutableMap.of("hadoopMRJobId", this.job.getJobID().toString()));
public Map<String, String> getInfo() { if (job != null) { Map<String, String> status = new HashMap<String, String>(); if (null != job.getJobID()) { status.put(ExecutableConstants.MR_JOB_ID, job.getJobID().toString()); } if (null != job.getTrackingURL()) { status.put(ExecutableConstants.YARN_APP_URL, job.getTrackingURL().toString()); } return status; } else { return Collections.emptyMap(); } }