/** {@inheritDoc} */ @Override public void killJob(JobID jobId) throws IOException, InterruptedException { try { execute(HadoopProtocolKillJobTask.class, jobId.getJtIdentifier(), jobId.getId()); } catch (GridClientException e) { throw new IOException("Failed to kill job: " + jobId, e); } }
@Override public TaskAttemptID newTaskAttemptID(JobID jobId, boolean isMap, int taskId, int id) { return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), isMap ? TaskType.MAP : TaskType.REDUCE, taskId, id); }
/** {@inheritDoc} */ @Override public Counters getJobCounters(JobID jobId) throws IOException, InterruptedException { try { final HadoopCounters counters = execute(HadoopProtocolJobCountersTask.class, jobId.getJtIdentifier(), jobId.getId()); if (counters == null) throw new IOException("Job tracker doesn't have any information about the job: " + jobId); return new HadoopMapReduceCounters(counters); } catch (GridClientException e) { throw new IOException("Failed to get job counters: " + jobId, e); } }
/** {@inheritDoc} */ @Override public JobStatus getJobStatus(JobID jobId) throws IOException, InterruptedException { try { Long delay = conf.getLong(HadoopJobProperty.JOB_STATUS_POLL_DELAY.propertyName(), -1); HadoopJobStatus status; if (delay >= 0) status = execute(HadoopProtocolJobStatusTask.class, jobId.getJtIdentifier(), jobId.getId(), delay); else status = execute(HadoopProtocolJobStatusTask.class, jobId.getJtIdentifier(), jobId.getId()); if (status == null) throw new IOException("Job tracker doesn't have any information about the job: " + jobId); return processStatus(status); } catch (GridClientException e) { throw new IOException("Failed to get job status: " + jobId, e); } }
/** {@inheritDoc} */ @Override public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts) throws IOException, InterruptedException { try { conf.setLong(HadoopCommonUtils.JOB_SUBMISSION_START_TS_PROPERTY, U.currentTimeMillis()); byte[] credentials = null; if (ts != null) credentials = WritableUtils.toByteArray(ts); HadoopJobStatus status = execute(HadoopProtocolSubmitJobTask.class, jobId.getJtIdentifier(), jobId.getId(), createJobInfo(conf, credentials)); if (status == null) throw new IOException("Failed to submit job (null status obtained): " + jobId); return processStatus(status); } catch (GridClientException | IgniteCheckedException e) { throw new IOException("Failed to submit job.", e); } }
/** * Test next job ID generation. * * @throws Exception If failed. */ private void tstNextJobId() throws Exception { IgniteHadoopClientProtocolProvider provider = provider(); ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT)); JobID jobId = proto.getNewJobID(); assert jobId != null; assert jobId.getJtIdentifier() != null; JobID nextJobId = proto.getNewJobID(); assert nextJobId != null; assert nextJobId.getJtIdentifier() != null; assert !F.eq(jobId, nextJobId); }
/** * Test of getJobID method, of class TaskID. */ @Test public void testGetJobID() { JobID jobId = new JobID("1234", 0); TaskID taskId = new TaskID(jobId, TaskType.MAP, 0); assertSame("TaskID did not store the JobID correctly", jobId, taskId.getJobID()); taskId = new TaskID(); assertEquals("Job ID was set unexpectedly in default contsructor", "", taskId.getJobID().getJtIdentifier()); }
private static void setJobID(Job job, JobID jobID, String namedOutput) { JobID newJobID = jobID == null || jobID.getJtIdentifier().contains(namedOutput) ? jobID : new JobID(jobID.getJtIdentifier() + "_" + namedOutput, jobID.getId()); job.setJobID(newJobID); }
/** * Downgrade a new JobID to an old one * @param old a new or old JobID * @return either old or a new JobID build to match old */ public static JobID downgrade(org.apache.hadoop.mapreduce.JobID old) { if (old instanceof JobID) { return (JobID) old; } else { return new JobID(old.getJtIdentifier(), old.getId()); } }
/** * Downgrade a new JobID to an old one * @param old a new or old JobID * @return either old or a new JobID build to match old */ public static JobID downgrade(org.apache.hadoop.mapreduce.JobID old) { if (old instanceof JobID) { return (JobID) old; } else { return new JobID(old.getJtIdentifier(), old.getId()); } }
/** * Downgrade a new JobID to an old one * @param old a new or old JobID * @return either old or a new JobID build to match old */ public static JobID downgrade(org.apache.hadoop.mapreduce.JobID old) { if (old instanceof JobID) { return (JobID) old; } else { return new JobID(old.getJtIdentifier(), old.getId()); } }
private String getJobJtIdentifier(Configuration conf) { JobID job = Preconditions.checkNotNull( HadoopFormats.getJobId(conf), "Configuration must contain jobID under key %s.", HadoopFormatIO.JOB_ID); return job.getJtIdentifier(); }
/** * Downgrade a new JobID to an old one * @param old a new or old JobID * @return either old or a new JobID build to match old */ public static JobID downgrade(org.apache.hadoop.mapreduce.JobID old) { if (old instanceof JobID) { return (JobID) old; } else { return new JobID(old.getJtIdentifier(), old.getId()); } }
/** * Downgrade a new JobID to an old one * @param old a new or old JobID * @return either old or a new JobID build to match old */ public static JobID downgrade(org.apache.hadoop.mapreduce.JobID old) { if (old instanceof JobID) { return (JobID) old; } else { return new JobID(old.getJtIdentifier(), old.getId()); } }
public static TezDAGID fromMRJobId( org.apache.hadoop.mapreduce.JobID jobId) { return TezDAGID.getInstance(ApplicationId.newInstance( Long.parseLong(jobId.getJtIdentifier()), jobId.getId()), 1); }
public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) { JobId jobId = recordFactory.newRecordInstance(JobId.class); jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid ApplicationId appId = ApplicationId.newInstance( toClusterTimeStamp(id.getJtIdentifier()), id.getId()); jobId.setAppId(appId); return jobId; }
private static TaskAttemptContext getTaskContext(TaskAttemptContext baseContext, Job job) { org.apache.hadoop.mapreduce.TaskAttemptID baseTaskId = baseContext.getTaskAttemptID(); // Create a task ID context with our specialized job ID. org.apache.hadoop.mapreduce.TaskAttemptID taskId; taskId = new org.apache.hadoop.mapreduce.TaskAttemptID(job.getJobID().getJtIdentifier(), job.getJobID().getId(), baseTaskId.isMap(), baseTaskId.getTaskID().getId(), baseTaskId.getId()); return new TaskAttemptContextWrapper(baseContext, job.getConfiguration(), taskId); }
public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) { JobId jobId = recordFactory.newRecordInstance(JobId.class); jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid ApplicationId appId = ApplicationId.newInstance( toClusterTimeStamp(id.getJtIdentifier()), id.getId()); jobId.setAppId(appId); return jobId; }
public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) { JobId jobId = recordFactory.newRecordInstance(JobId.class); jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid ApplicationId appId = ApplicationId.newInstance( toClusterTimeStamp(id.getJtIdentifier()), id.getId()); jobId.setAppId(appId); return jobId; }
public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) { JobId jobId = recordFactory.newRecordInstance(JobId.class); jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid ApplicationId appId = ApplicationId.newInstance( toClusterTimeStamp(id.getJtIdentifier()), id.getId()); jobId.setAppId(appId); return jobId; }