overwriteJobConf(conf, executableContext.getConfig(), getMapReduceParams().trim().split("\\s+")); Job job = new Cluster(conf).getJob(JobID.forName(mrJobId)); if (job == null || job.getJobState() == JobStatus.State.FAILED) {
@Override protected void onExecuteStart(ExecutableContext executableContext) { final Output output = executableManager.getOutput(getId()); if (output.getExtra().containsKey(START_TIME)) { final String mrJobId = output.getExtra().get(ExecutableConstants.MR_JOB_ID); if (mrJobId == null) { executableManager.updateJobOutput(getId(), ExecutableState.RUNNING, null, null); return; } try { Job job = new Cluster(new Configuration()).getJob(JobID.forName(mrJobId)); if (job.getJobState() == JobStatus.State.FAILED) { //remove previous mr job info super.onExecuteStart(executableContext); } else { executableManager.updateJobOutput(getId(), ExecutableState.RUNNING, null, null); } } catch (IOException e) { logger.warn("error get hadoop status"); super.onExecuteStart(executableContext); } catch (InterruptedException e) { logger.warn("error get hadoop status"); super.onExecuteStart(executableContext); } } else { super.onExecuteStart(executableContext); } }
/** * Tells the service to get the state of the current job. */ public synchronized int getJobState() throws IOException { try { return job.getJobState().getValue(); } catch (InterruptedException ie) { throw new IOException(ie); } }
/** * Tells the service to get the state of the current job. */ public synchronized int getJobState() throws IOException { try { return job.getJobState().getValue(); } catch (InterruptedException ie) { throw new IOException(ie); } }
/** * Tells the service to get the state of the current job. */ public synchronized int getJobState() throws IOException { try { return job.getJobState().getValue(); } catch (InterruptedException ie) { throw new IOException(ie); } }
/** * Tells the service to get the state of the current job. */ public synchronized int getJobState() throws IOException { try { return job.getJobState().getValue(); } catch (InterruptedException ie) { throw new IOException(ie); } }
/** * Tells the service to get the state of the current job. */ public synchronized int getJobState() throws IOException { try { return job.getJobState().getValue(); } catch (InterruptedException ie) { throw new IOException(ie); } }
" status remains " + job.getJobState()); jobList.remove(job);
if (workloadJob != null) { try { workloadAppState = workloadJob.getJobState(); } catch (IOException ioe) { LOG.warn("Unable to fetch completion status of workload job. Will proceed to attempt to kill it.", ioe);
protected void displayJobList(Job[] jobs) throws IOException, InterruptedException { System.out.println("Total jobs:" + jobs.length); System.out.println("JobId\tState\tStartTime\t" + "UserName\tPriority\tSchedulingInfo"); for (Job job : jobs) { System.out.printf("%s\t%s\t%d\t%s\t%s\t%s\n", job.getJobID().toString(), job.getJobState(), job.getStartTime(), job.getUser(), job.getPriority().name(), job.getSchedulingInfo()); } }
workloadJob.monitorAndPrintJob(); Thread.sleep(5000); workloadAppState = workloadJob.getJobState();
overwriteJobConf(conf, executableContext.getConfig(), getMapReduceParams().trim().split("\\s+")); Job job = new Cluster(conf).getJob(JobID.forName(mrJobId)); if (job == null || job.getJobState() == JobStatus.State.FAILED) {
@Override protected void onExecuteStart(ExecutableContext executableContext) { final Output output = executableManager.getOutput(getId()); if (output.getExtra().containsKey(START_TIME)) { final String mrJobId = output.getExtra().get(ExecutableConstants.MR_JOB_ID); if (mrJobId == null) { executableManager.updateJobOutput(getId(), ExecutableState.RUNNING, null, null); return; } try { Configuration conf = HadoopUtil.getCurrentConfiguration(); Job job = new Cluster(conf).getJob(JobID.forName(mrJobId)); if (job.getJobState() == JobStatus.State.FAILED) { //remove previous mr job info super.onExecuteStart(executableContext); } else { executableManager.updateJobOutput(getId(), ExecutableState.RUNNING, null, null); } } catch (IOException e) { logger.warn("error get hadoop status"); super.onExecuteStart(executableContext); } catch (InterruptedException e) { logger.warn("error get hadoop status"); super.onExecuteStart(executableContext); } } else { super.onExecuteStart(executableContext); } }
boolean succeeded = job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId ,
/** * To ensure nothing broken after we removed normalization * from the MRAM side * @throws Exception */ @Test public void testJobWithNonNormalizedCapabilities() throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } JobConf jobConf = new JobConf(mrCluster.getConfig()); jobConf.setInt("mapreduce.map.memory.mb", 700); jobConf.setInt("mapred.reduce.memory.mb", 1500); SleepJob sleepJob = new SleepJob(); sleepJob.setConf(jobConf); Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1); job.setJarByClass(SleepJob.class); job.addFileToClassPath(APP_JAR); // The AppMaster jar itself. job.submit(); boolean completed = job.waitForCompletion(true); Assert.assertTrue("Job should be completed", completed); Assert.assertEquals("Job should be finished successfully", JobStatus.State.SUCCEEDED, job.getJobState()); }
@Override public Void run() throws Exception { SleepJob sleepJob = new SleepJob(); sleepJob.setConf(mrCluster.getConfig()); Job job = sleepJob.createJob(3, 0, 10000, 1, 0, 0); // //Job with reduces // Job job = sleepJob.createJob(3, 2, 10000, 1, 10000, 1); job.addFileToClassPath(APP_JAR); // The AppMaster jar itself. job.submit(); String trackingUrl = job.getTrackingURL(); String jobId = job.getJobID().toString(); job.waitForCompletion(true); Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId , trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/")); return null; } });
Assert.assertEquals(TaskCompletionEvent.Status.TIPFAILED, events[1].getStatus()); Assert.assertEquals(JobStatus.State.FAILED, job.getJobState()); verifyFailingMapperCounters(job);
Assert.assertTrue(status == TaskCompletionEvent.Status.FAILED || status == TaskCompletionEvent.Status.TIPFAILED); Assert.assertEquals(JobStatus.State.FAILED, job.getJobState());
boolean succeeded = job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId ,
Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); Counters counters = job.getCounters(); Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS) Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); counters = job.getCounters(); Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); counters = job.getCounters();