private void maybeMateJobAndConf() throws IOException { if (jobBeingTraced != null && jobconf != null && jobBeingTraced.getJobID().toString().equals(jobconf.jobID)) { jobBeingTraced.setHeapMegabytes(jobconf.heapMegabytes); jobBeingTraced.setQueue(jobconf.queue); jobBeingTraced.setJobName(jobconf.jobName); jobBeingTraced.setClusterMapMB(jobconf.clusterMapMB); jobBeingTraced.setClusterReduceMB(jobconf.clusterReduceMB); jobBeingTraced.setJobMapMB(jobconf.jobMapMB); jobBeingTraced.setJobReduceMB(jobconf.jobReduceMB); jobBeingTraced.setJobProperties(jobconf.properties); jobconf = null; finalizeJob(); } }
/** * Getting the number of map tasks that are actually logged in the trace. * @return The number of map tasks that are actually logged in the trace. */ public int getNumLoggedMaps() { return job.getMapTasks().size(); }
@Override public Values getOutcome() { return job.getOutcome(); }
private void adjustJobTimes(LoggedJob adjustee) { long offsetInCycle = (adjustee.getSubmitTime() - firstJobSubmitTime) % inputCycle; long outputOffset = (long) ((double) offsetInCycle * timeDilation); long adjustment = firstJobSubmitTime + outputOffset - adjustee.getSubmitTime(); adjustee.adjustTimes(adjustment); }
@Override public long getSubmissionTime() { return job.getSubmitTime() - job.getRelativeTime(); }
&& jobTraceGen != null && (jobBeingTraced == null || !jobID.equals(jobBeingTraced.getJobID().toString()))) { jobBeingTraced = new LoggedJob(jobID); jobBeingTraced.setUser(user); jobBeingTraced.setPriority(LoggedJob.JobPriority .valueOf(jobPriority)); jobBeingTraced.setTotalMaps(Integer.parseInt(totalMaps)); jobBeingTraced.setTotalReduces(Integer.parseInt(totalReduces)); jobBeingTraced.setSubmitTime(Long.parseLong(submitTime)); jobBeingTraced.setLaunchTime(Long.parseLong(launchTime)); jobBeingTraced.setFinishTime(Long.parseLong(finishTime)); if (status != null) { jobBeingTraced.setOutcome(Pre21JobHistoryConstants.Values .valueOf(status));
String user = job.getUser() == null ? "default" : job.getUser().getValue(); String jobQueue = job.getQueue().getValue(); String oldJobId = job.getJobID().toString(); long jobStartTimeMS = job.getSubmitTime(); long jobFinishTimeMS = job.getFinishTime(); if (baselineTimeMS == 0) { baselineTimeMS = jobStartTimeMS; new ArrayList<ContainerSimulator>(); for(LoggedTask mapTask : job.getMapTasks()) { if (mapTask.getAttempts().size() == 0) { continue; for(LoggedTask reduceTask : job.getReduceTasks()) { if (reduceTask.getAttempts().size() == 0) { continue;
+ job.getNumberReduces() + "r"); System.out .println(loggedJob.getOutcome() + ", " + loggedJob.getJobtype()); for (LoggedDiscreteCDF cdf : loggedJob.getSuccessfulMapAttemptCDFs()) { System.out.println(cdf.getNumberValues() + ": " + cdf.getMinimum() + "--" + cdf.getMaximum()); for (LoggedDiscreteCDF cdf : loggedJob.getFailedMapAttemptCDFs()) { System.out.println(cdf.getNumberValues() + ": " + cdf.getMinimum() + "--" + cdf.getMaximum()); LoggedDiscreteCDF cdf = loggedJob.getSuccessfulReduceAttemptCDF(); System.out.println(cdf.getNumberValues() + ": " + cdf.getMinimum() + "--" + cdf.getMaximum()); cdf = loggedJob.getFailedReduceAttemptCDF(); System.out.println(cdf.getNumberValues() + ": " + cdf.getMinimum() + "--" + cdf.getMaximum()); for (double p : loggedJob.getMapperTriesToSucceed()) { System.out.print(p + ", ");
Path emptyPath = new Path("/"); int totalHosts = 0; // use to determine avg # of hosts per split. for (LoggedTask mapTask : job.getMapTasks()) { Pre21JobHistoryConstants.Values taskType = mapTask.getTaskType(); if (taskType != Pre21JobHistoryConstants.Values.MAP) { int totalMaps = job.getTotalMaps(); if (totalMaps < splitsList.size()) { LOG.warn("TotalMaps for job " + job.getJobID() + " is less than the total number of map task descriptions (" + totalMaps + "<" + splitsList.size() + ").");
.getMapTasks() : typ == Pre21JobHistoryConstants.Values.REDUCE ? jobBeingTraced.getReduceTasks() : jobBeingTraced .getOtherTasks();
LoggedJob job = null; while ((job = reader.getNext()) != null) { for(LoggedTask mapTask : job.getMapTasks()) { nodeSet.add(taskAttempt.getHostName().getValue()); for(LoggedTask reduceTask : job.getReduceTasks()) { if (reduceTask.getAttempts().size() == 0) { continue;
private synchronized long getNextRandomSeed() { numRandomSeeds++; return RandomSeedGenerator.getSeed("forZombieJob" + job.getJobID(), numRandomSeeds); }
private long doMakeUpReduceRuntime(State state) { long reduceTime; try { if (state == State.SUCCEEDED) { reduceTime = makeUpRuntime(job.getSuccessfulReduceAttemptCDF()); } else if (state == State.FAILED) { reduceTime = makeUpRuntime(job.getFailedReduceAttemptCDF()); } else { throw new IllegalArgumentException( "state is neither SUCCEEDED nor FAILED: " + state); } return reduceTime; } catch (NoValueToMakeUpRuntime e) { return 0; } }
LoggedJob nextJob() throws IOException, OutOfOrderException { LoggedJob newJob = rawNextJob(); if (newJob != null) { skewBuffer.add(newJob); } LoggedJob result = skewBuffer.poll(); while (result != null && result.getSubmitTime() < returnedLatestSubmitTime) { LOG.error("The current job was submitted earlier than the previous one"); LOG.error("Its jobID is " + result.getJobID()); LOG.error("Its submit time is " + result.getSubmitTime() + ",but the previous one was " + returnedLatestSubmitTime); if (abortOnUnfixableSkew) { throw new OutOfOrderException("Job submit time is " + result.getSubmitTime() + ",but the previous one was " + returnedLatestSubmitTime); } result = rawNextJob(); } if (result != null) { returnedLatestSubmitTime = result.getSubmitTime(); } return result; }
private long makeUpMapRuntime(State state, int locality) { long runtime; // make up runtime if (state == State.SUCCEEDED || state == State.FAILED) { List<LoggedDiscreteCDF> cdfList = state == State.SUCCEEDED ? job.getSuccessfulMapAttemptCDFs() : job .getFailedMapAttemptCDFs(); // XXX MapCDFs is a ArrayList of 4 possible groups: distance=0, 1, 2, and // the last group is "distance cannot be determined". All pig jobs // would have only the 4th group, and pig tasks usually do not have // any locality, so this group should count as "distance=2". // However, setup/cleanup tasks are also counted in the 4th group. // These tasks do not make sense. if(cdfList==null) { runtime = -1; return runtime; } try { runtime = makeUpRuntime(cdfList.get(locality)); } catch (NoValueToMakeUpRuntime e) { runtime = makeUpRuntime(cdfList); } } else { throw new IllegalArgumentException( "state is neither SUCCEEDED nor FAILED: " + state); } return runtime; }
@Override public int compare(LoggedJob j1, LoggedJob j2) { return (j1.getSubmitTime() < j2.getSubmitTime()) ? -1 : (j1 .getSubmitTime() == j2.getSubmitTime()) ? 0 : 1; } }
/** * Getting the number of reduce tasks that are actually logged in the trace. * @return The number of map tasks that are actually logged in the trace. */ public int getNumLoggedReduces() { return job.getReduceTasks().size(); }
@Override public int getNumberMaps() { return sanitizeValue(job.getTotalMaps(), 0, "NumberMaps", job.getJobID()); }
@Override public int getNumberReduces() { return sanitizeValue(job.getTotalReduces(), 0, "NumberReduces", job.getJobID()); }
@Override public String getQueueName() { QueueName queue = job.getQueue(); return (queue == null || queue.getValue() == null) ? JobConf.DEFAULT_QUEUE_NAME : queue.getValue(); }