long mapOutputBytes = counters.getCounter(Task.Counter.MAP_OUTPUT_BYTES); long averageNumberOfBytesPerChunk = mapOutputBytes / numChunks / cluster.getNumberOfPartitions(); if (averageNumberOfBytesPerChunk > (HadoopStoreWriter.DEFAULT_CHUNK_SIZE)) { long numberOfRecords = counters.getCounter(Task.Counter.REDUCE_INPUT_GROUPS); + counters.getCounter(KeyValueWriter.CollisionCounter.NUM_COLLISIONS)); logger.info("Maximum number of collisions for one entry - " + counters.getCounter(KeyValueWriter.CollisionCounter.MAX_COLLISIONS));
long changed = c.getCounter(PrCounters.CONVERGE_CHECK); System.out.println("Iteration = " + i + ", changed reducer = " + changed);
long changed = c.getCounter(PrCounters.CONVERGE_CHECK); System.out.println("Iteration = " + i + ", changed reducer = " + changed);
static double ComputeAER(Counters c) { double den = c.getCounter(AlignmentEvalEnum.HYPOTHESIZED_ALIGNMENT_POINTS) + c.getCounter(AlignmentEvalEnum.REF_ALIGNMENT_POINTS); double num = c.getCounter(AlignmentEvalEnum.PROBABLE_HITS) + c.getCounter(AlignmentEvalEnum.SURE_HITS); double aer = ((double)((int)((1.0 - num/den)*10000.0)))/100.0; double prec = ((double)((int)((((double)c.getCounter(AlignmentEvalEnum.PROBABLE_HITS)) /((double)c.getCounter(AlignmentEvalEnum.HYPOTHESIZED_ALIGNMENT_POINTS)))*10000.0)))/100.0; System.out.println("PREC: " + prec); return aer; }
long count = counters.getCounter(c); sb.append(String.format("%20s = %d\n", name, count)); counters.getCounter(Enum.valueOf(Counter.class, timeName)) / numNanoSecPerMS; sb.append(String.format("%20s = %.2f (ms)\n", timeName, totalTime)); } else if (!name.endsWith(timeSuffix)) { long count = counters.getCounter(c); sb.append(String.format("%20s = %d\n", name, count));
currentCount += counters.getCounter(e); aggMap.put(e, currentCount);
/** * called in ExecDriver.progress periodically. * * @param ctrs * counters from the running job */ @SuppressWarnings("unchecked") public void updateCounters(Counters ctrs) { if (counters == null) { counters = new HashMap<String, Long>(); } // For some old unit tests, the counters will not be populated. Eventually, // the old tests should be removed if (counterNameToEnum == null) { return; } for (Map.Entry<String, ProgressCounter> counter : counterNameToEnum .entrySet()) { counters.put(counter.getKey(), ctrs.getCounter(counter.getValue())); } // update counters of child operators // this wont be an infinite loop since the operator graph is acyclic // but, some operators may be updated more than once and that's ok if (getChildren() != null) { for (Node op : getChildren()) { ((Operator<? extends Serializable>) op).updateCounters(ctrs); } } }
public static Long get(FlowStepStats step, Enum value) throws IOException { if (step instanceof HadoopStepStats) { HadoopStepStats hadoopStep = (HadoopStepStats)step; return hadoopStep.getRunningJob().getCounters().getCounter(value); } else { return step.getCounterValue(value); } }
Counters counters = job.getCounters(); assertEquals("Number of local maps", counters.getCounter(JobCounter.OTHER_LOCAL_MAPS), otherLocalMaps); assertEquals("Number of Data-local maps", counters.getCounter(JobCounter.DATA_LOCAL_MAPS), dataLocalMaps); assertEquals("Number of Rack-local maps", counters.getCounter(JobCounter.RACK_LOCAL_MAPS), rackLocalMaps); mr.waitUntilIdle();
@SuppressWarnings("deprecation") public static void main(String[] args) throws IOException { int mapTasks = 15; JobConf conf = new JobConf(M1ViterbiMapper.class); conf.setJobName("m1viterbi"); conf.setOutputKeyClass(LongWritable.class); conf.setOutputValueClass(Text.class); conf.setMapperClass(M1ViterbiMapper.class); conf.setNumMapTasks(mapTasks); conf.setNumReduceTasks(0); conf.setInputFormat(SequenceFileInputFormat.class); FileInputFormat.setInputPaths(conf, new Path(bitext)); FileOutputFormat.setOutputPath(conf, new Path("somealigns.test")); RunningJob rj = JobClient.runJob(conf); Counters cs = rj.getCounters(); double lp = (double)cs.getCounter(CrossEntropyCounters.LOGPROB); double wc = (double)cs.getCounter(CrossEntropyCounters.WORDCOUNT); double ce = (lp / wc) / Math.log(2.0); System.out.println("Viterbi cross-entropy: " + ce + " perplexity: " + Math.pow(2.0, ce)); }
long numFiles = ctrs.getCounter(ProgressCounter.CREATED_FILES); long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); if (numFiles > upperLimit) {
+ counterName); } else { long value = ctrs.getCounter(pc); fatalErrorMessage(errMsg, value); if (value != 0) {
static void checkTaskCounts(JobInProgress jip, int runningMaps, int pendingMaps, int runningReduces, int pendingReduces) { Counters counter = jip.getJobCounters(); long totalTaskCount = counter.getCounter(JobCounter.TOTAL_LAUNCHED_MAPS) + counter.getCounter(JobCounter.TOTAL_LAUNCHED_REDUCES); LOG.info("totalTaskCount is " + totalTaskCount); LOG.info(" Running Maps:" + jip.runningMaps() + " Pending Maps:" + jip.pendingMaps() + " Running Reds:" + jip.runningReduces() + " Pending Reds:" + jip.pendingReduces()); assertEquals(jip.getNumTaskCompletionEvents(),totalTaskCount); assertEquals(runningMaps, jip.runningMaps()); assertEquals(pendingMaps, jip.pendingMaps()); assertEquals(runningReduces, jip.runningReduces()); assertEquals(pendingReduces, jip.pendingReduces()); }
Counters jobCounters = job.getJobCounters(); long mapSlotSeconds = (jobCounters.getCounter(Counter.SLOTS_MILLIS_MAPS) + jobCounters.getCounter(Counter.FALLOW_SLOTS_MILLIS_MAPS)) / 1000; long reduceSlotSeconds = (jobCounters.getCounter(Counter.SLOTS_MILLIS_REDUCES) + jobCounters.getCounter(Counter.FALLOW_SLOTS_MILLIS_REDUCES)) / 1000;
Counters counters = job.getCounters(); assertEquals("Number of local maps", counters.getCounter(JobCounter.OTHER_LOCAL_MAPS), otherLocalMaps); assertEquals("Number of Data-local maps", counters.getCounter(JobCounter.DATA_LOCAL_MAPS), dataLocalMaps); assertEquals("Number of Rack-local maps", counters.getCounter(JobCounter.RACK_LOCAL_MAPS), rackLocalMaps); mr.waitUntilIdle();
counters.getCounter(JobCounter.DATA_LOCAL_MAPS)); counters.getCounter(JobCounter.RACK_LOCAL_MAPS)); counters.getCounter(JobCounter.OTHER_LOCAL_MAPS));
Counters jobCounters = job.getJobCounters(); long mapSlotSeconds = (jobCounters.getCounter(JobCounter.SLOTS_MILLIS_MAPS) + jobCounters.getCounter(JobCounter.FALLOW_SLOTS_MILLIS_MAPS)) / 1000; long reduceSlotSeconds = (jobCounters.getCounter(JobCounter.SLOTS_MILLIS_REDUCES) + jobCounters.getCounter(JobCounter.FALLOW_SLOTS_MILLIS_REDUCES)) / 1000;
public static void verifyCounters(RunningJob runningJob, int expected) throws IOException { assertEquals(expected, runningJob.getCounters().getCounter(EnumCounter.MAP_RECORDS)); assertEquals(expected, runningJob.getCounters().getGroup("StringCounter") .getCounter("MapRecords")); }
if (counters.getCounter(JobCounter.TOTAL_LAUNCHED_MAPS) == 1) { break;
@SuppressWarnings("deprecation") private long getTaskCounterUsage (JobClient client, JobID id, int numReports, int taskId, TaskType type) throws Exception { TaskReport[] reports = null; if (TaskType.MAP.equals(type)) { reports = client.getMapTaskReports(id); } else if (TaskType.REDUCE.equals(type)) { reports = client.getReduceTaskReports(id); } assertNotNull("No reports found for task type '" + type.name() + "' in job " + id, reports); // make sure that the total number of reports match the expected assertEquals("Mismatch in task id", numReports, reports.length); Counters counters = reports[taskId].getCounters(); return counters.getCounter(TaskCounter.COMMITTED_HEAP_BYTES); }