@Override public synchronized void initTasks() throws IOException { super.initTasks(); jobtracker.getInstrumentation().addWaitingMaps(getJobID(), numMapTasks); jobtracker.getInstrumentation().addWaitingReduces(getJobID(), numReduceTasks); } }
try { map_tasks.getAndIncrement(); myMetrics.launchMap(mapId); map.run(localConf, Job.this); myMetrics.completeMap(mapId); } finally { map_tasks.getAndDecrement();
void createTaskEntry(TaskAttemptID taskid, String taskTracker, TaskInProgress tip) { LOG.info("Adding task " + (tip.isCleanupAttempt(taskid) ? "(cleanup)" : "") + "'" + taskid + "' to tip " + tip.getTIPId() + ", for tracker '" + taskTracker + "'"); // taskid --> tracker taskidToTrackerMap.put(taskid, taskTracker); // tracker --> taskid Set<TaskAttemptID> taskset = trackerToTaskMap.get(taskTracker); if (taskset == null) { taskset = new TreeSet<TaskAttemptID>(); trackerToTaskMap.put(taskTracker, taskset); } taskset.add(taskid); // taskid --> TIP taskidToTIPMap.put(taskid, tip); // Note this launch if (taskid.isMap()) { myInstrumentation.launchMap(taskid); } else { myInstrumentation.launchReduce(taskid); } }
private void removeHostCapacity(String hostName) { synchronized (taskTrackers) { // remove the capacity of trackers on this host int numTrackersOnHost = 0; for (TaskTrackerStatus status : getStatusesOnHost(hostName)) { updateTotalTaskCapacity(status); removeTaskTrackerCapacity(status); int mapSlots = taskScheduler.getMaxSlots(status, TaskType.MAP); int reduceSlots = taskScheduler.getMaxSlots(status, TaskType.REDUCE); ++numTrackersOnHost; getInstrumentation().addBlackListedMapSlots( mapSlots); getInstrumentation().addBlackListedReduceSlots( reduceSlots); } uniqueHostsMap.remove(hostName); incrBlackListedTrackers(numTrackersOnHost); } }
if (tip.isSpeculating()) { speculativeMapTasks++; metrics.speculateMap(id); if (LOG.isDebugEnabled()) { LOG.debug("Chosen speculative task, current speculativeMap task count: " metrics.launchMap(id); } else { ++runningReduceTasks; if (tip.isSpeculating()) { speculativeReduceTasks++; metrics.speculateReduce(id); if (LOG.isDebugEnabled()) { LOG.debug("Chosen speculative task, current speculativeReduce task count: " metrics.launchReduce(id); LOG.info("Choosing data-local task " + tip.getTIPId()); jobCounters.incrCounter(JobCounter.DATA_LOCAL_MAPS, 1); metrics.launchDataLocalMap(id); break; case 1: LOG.info("Choosing rack-local task " + tip.getTIPId()); jobCounters.incrCounter(JobCounter.RACK_LOCAL_MAPS, 1); metrics.launchRackLocalMap(id); break; default :
map.setConf(localConf); map_tasks += 1; myMetrics.launchMap(mapId); map.run(localConf, this); myMetrics.completeMap(mapId); map_tasks -= 1; updateCounters(map); reduce.setConf(localConf); reduce_tasks += 1; myMetrics.launchReduce(reduce.getTaskID()); reduce.run(localConf, this); myMetrics.completeReduce(reduce.getTaskID()); reduce_tasks -= 1; updateCounters(reduce);
metrics.speculativeSucceededMap(taskid, tip.isUsingProcessingRateForSpeculation()); case 0: jobCounters.incrCounter(Counter.LOCAL_MAP_INPUT_BYTES, inputBytes); metrics.addLocalMapInputBytes(inputBytes); break; case 1: jobCounters.incrCounter(Counter.RACK_MAP_INPUT_BYTES, inputBytes); metrics.addRackMapInputBytes(inputBytes); break; default:metrics.addMapInputBytes(inputBytes); break; metrics.completeMap(taskid); if (!garbageCollected) { if (!tip.isJobSetupTask() && hasSpeculativeMaps) { metrics.speculativeSucceededReduce(taskid, tip.isUsingProcessingRateForSpeculation()); metrics.completeReduce(taskid); if (!garbageCollected) { if (!tip.isJobSetupTask() && hasSpeculativeReduces) {
metrics.completeMap(taskid); metrics.completeReduce(taskid);
" exceeds the configured limit " + maxTasks); jobtracker.getInstrumentation().addWaitingMaps(getJobID(), numMapTasks); jobtracker.getInstrumentation().addWaitingReduces(getJobID(), numReduceTasks); jobtracker.getInstrumentation().addLaunchedJobs( this.launchTime - this.startTime);
reduce.setConf(localConf); reduce_tasks += 1; myMetrics.launchReduce(reduce.getTaskID()); reduce.run(localConf, this); myMetrics.completeReduce(reduce.getTaskID()); reduce_tasks -= 1; } else {
/** * Adds a job to the jobtracker. Make sure that the checks are inplace before * adding a job. This is the core job submission logic * @param jobId The id for the job submitted which needs to be added */ private synchronized JobStatus addJob(JobID jobId, JobInProgress job) throws IOException { totalSubmissions++; synchronized (jobs) { synchronized (taskScheduler) { jobs.put(job.getProfile().getJobID(), job); for (JobInProgressListener listener : jobInProgressListeners) { listener.jobAdded(job); } } } myInstrumentation.submitJob(this.conf, jobId); return job.getStatus(); }
/** * The job is done since all it's component tasks are either * successful or have failed. * * @param metrics job-tracker metrics */ private void jobComplete(JobTrackerInstrumentation metrics) { // // All tasks are complete, then the job is done! // if (this.status.getRunState() == JobStatus.RUNNING ) { this.status.setRunState(JobStatus.SUCCEEDED); this.status.setCleanupProgress(1.0f); this.finishTime = System.currentTimeMillis(); LOG.info("Job " + this.status.getJobID() + " has completed successfully."); JobHistory.JobInfo.logFinished(this.status.getJobID(), finishTime, this.finishedMapTasks, this.finishedReduceTasks, failedMapTasks, failedReduceTasks, getCounters()); // Note that finalize will close the job history handles which garbage collect // might try to finalize garbageCollect(); metrics.completeJob(this.conf, this.status.getJobID()); } }
if (task.isMapTask()) { LocalJobRunner.this.map_tasks -= 1; LocalJobRunner.this.myMetrics.completeMap(task.getTaskID());
totalSpeculativeMapTasks.incrementAndGet(); metrics.speculateMap(id, tip.isUsingProcessingRateForSpeculation()); metrics.launchMap(id); } else { if (firstReduceStartTime == 0) { totalSpeculativeReduceTasks.incrementAndGet(); metrics.speculateReduce(id, tip.isUsingProcessingRateForSpeculation()); metrics.launchReduce(id); LOG.info("Choosing data-local task " + tip.getTIPId()); jobCounters.incrCounter(Counter.DATA_LOCAL_MAPS, 1); metrics.launchDataLocalMap(id); break; case 1: LOG.info("Choosing rack-local task " + tip.getTIPId()); jobCounters.incrCounter(Counter.RACK_LOCAL_MAPS, 1); metrics.launchRackLocalMap(id); break; default :
map.setJobFile(taskJobFile.toUri().getPath()); map_tasks += 1; myMetrics.launchMap(mapId); myMetrics.completeMap(mapId); map_tasks -= 1; } else { reduce.setJobFile(localFile.toUri().getPath()); reduce_tasks += 1; myMetrics.launchReduce(reduce.getTaskID()); reduce.run(localConf, this); myMetrics.completeReduce(reduce.getTaskID()); reduce_tasks -= 1; updateCounters(reduce.getTaskID(), reduce.getCounters());
runningMapTasks -= 1; finishedMapTasks += 1; metrics.completeMap(taskid); if (!tip.isJobSetupTask() && hasSpeculativeMaps) { updateTaskTrackerStats(tip,ttStatus,trackerMapStats,mapTaskStats); runningReduceTasks -= 1; finishedReduceTasks += 1; metrics.completeReduce(taskid); if (!tip.isJobSetupTask() && hasSpeculativeReduces) { updateTaskTrackerStats(tip,ttStatus,trackerReduceStats,reduceTaskStats);
private void removeHostCapacity(String hostName) { synchronized (taskTrackers) { // remove the capacity of trackers on this host int numTrackersOnHost = 0; for (TaskTrackerStatus status : getStatusesOnHost(hostName)) { int mapSlots = status.getMaxMapSlots(); totalMapTaskCapacity -= mapSlots; int reduceSlots = status.getMaxReduceSlots(); totalReduceTaskCapacity -= reduceSlots; ++numTrackersOnHost; getInstrumentation().addBlackListedMapSlots( mapSlots); getInstrumentation().addBlackListedReduceSlots( reduceSlots); } // remove the host uniqueHostsMap.remove(hostName); incrBlackListedTrackers(numTrackersOnHost); } }
/** * Adds a job to the jobtracker. Make sure that the checks are inplace before * adding a job. This is the core job submission logic * @param jobId The id for the job submitted which needs to be added */ synchronized JobStatus addJob(JobID jobId, JobInProgress job) { totalSubmissions++; synchronized (jobs) { synchronized (taskScheduler) { jobs.put(job.getProfile().getJobID(), job); for (JobInProgressListener listener : jobInProgressListeners) { try { listener.jobAdded(job); } catch (IOException ioe) { LOG.warn("Failed to add and so skipping the job : " + job.getJobID() + ". Exception : " + ioe); } } } } myInstrumentation.submitJob(job.getJobConf(), jobId); LOG.info("Job " + jobId + " added successfully for user '" + job.getJobConf().getUser() + "' to queue '" + job.getJobConf().getQueueName() + "'"); return job.getStatus(); }
metrics.completeJob(this.conf, this.status.getJobID());
jobtracker.getInstrumentation().addWaitingMaps(getJobID(), numMapTasks); jobtracker.getInstrumentation().addWaitingReduces(getJobID(), numReduceTasks);