/** * Ensures that the given number of map tasks for the given job * configuration does not exceed the number of regions for the given table. * * @param table The table to get the region count for. * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ // Used by tests. public static void limitNumMapTasks(String table, JobConf job) throws IOException { int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); if (job.getNumMapTasks() > regions) job.setNumMapTasks(regions); }
@Override public List<InputSplit> getSplits(JobContext context) throws IOException { JobConf conf = HadoopCfgUtils.asJobConf(CompatHandler.jobContext(context).getConfiguration()); // NOTE: this method expects a ShardInputSplit to be returned (which implements both the old and the new API). return Arrays.asList((InputSplit[]) getSplits(conf, conf.getNumMapTasks())); }
totalSize += segment.getSegment().getSize(); int mapTask = conf.getNumMapTasks(); if (mapTask > 0) { maxSize = totalSize / mapTask;
@Test public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME, jobConf); TableMapReduceUtil.limitNumMapTasks(TABLE_NAME, jobConf); assertEquals(1, jobConf.getNumMapTasks()); jobConf.setNumMapTasks(10); TableMapReduceUtil.setNumMapTasks(TABLE_NAME, jobConf); TableMapReduceUtil.limitNumMapTasks(TABLE_NAME, jobConf); assertEquals(1, jobConf.getNumMapTasks()); }
@Override public void configure(JobConf conf) { nMaps = conf.getNumMapTasks(); ((StringBuilder)fmt.out()).append(keyfmt); }
/** * Create an almost empty JobInProgress, which can be used only for tests */ protected JobInProgress(JobID jobid, JobConf conf) { this.conf = conf; this.jobId = jobid; this.numMapTasks = conf.getNumMapTasks(); this.numReduceTasks = conf.getNumReduceTasks(); }
public int getCurrentNumMappers() { return getJobConf().getNumMapTasks(); }
public int getCurrentNumMappers() { return getJobConf().getNumMapTasks(); }
@Override public void configure(JobConf conf) { nMaps = conf.getNumMapTasks(); id = nMaps - conf.getInt(JobContext.TASK_PARTITION, -1) - 1; Arrays.fill(b, 0, 4096, (byte)'V'); ((StringBuilder)fmt.out()).append(keyfmt); }
static RunningJob runJob(JobConf conf, Path inDir, Path outDir) throws IOException { return runJob(conf, inDir, outDir, conf.getNumMapTasks(), conf.getNumReduceTasks()); }
/** * Ensures that the given number of map tasks for the given job * configuration does not exceed the number of regions for the given table. * * @param table The table to get the region count for. * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ public static void limitNumMapTasks(String table, JobConf job) throws IOException { HTable outputTable = new HTable(HBaseConfiguration.create(job), table); int regions = outputTable.getRegionsInfo().size(); if (job.getNumMapTasks() > regions) job.setNumMapTasks(regions); }
@Override public List<InputSplit> getSplits(JobContext context) throws IOException { JobConf conf = HadoopCfgUtils.asJobConf(CompatHandler.jobContext(context).getConfiguration()); // NOTE: this method expects a ShardInputSplit to be returned (which implements both the old and the new API). return Arrays.asList((InputSplit[]) getSplits(conf, conf.getNumMapTasks())); }
@Override public List<InputSplit> getSplits(JobContext context) throws IOException { JobConf conf = HadoopCfgUtils.asJobConf(CompatHandler.jobContext(context).getConfiguration()); // NOTE: this method expects a ShardInputSplit to be returned (which implements both the old and the new API). return Arrays.asList((InputSplit[]) getSplits(conf, conf.getNumMapTasks())); }
@Override public void configure(JobConf conf) { nMaps = conf.getNumMapTasks(); ((StringBuilder)fmt.out()).append(keyfmt); }
@Override public List<InputSplit> getSplits(JobContext context) throws IOException { JobConf conf = HadoopCfgUtils.asJobConf(CompatHandler.jobContext(context).getConfiguration()); // NOTE: this method expects a ShardInputSplit to be returned (which implements both the old and the new API). return Arrays.asList((InputSplit[]) getSplits(conf, conf.getNumMapTasks())); }
@Override public List<InputSplit> getSplits(JobContext context) throws IOException { JobConf conf = HadoopCfgUtils.asJobConf(CompatHandler.jobContext(context).getConfiguration()); // NOTE: this method expects a ShardInputSplit to be returned (which implements both the old and the new API). return Arrays.asList((InputSplit[]) getSplits(conf, conf.getNumMapTasks())); }
@Override public void configure(JobConf conf) { nMaps = conf.getNumMapTasks(); id = nMaps - conf.getInt(JobContext.TASK_PARTITION, -1) - 1; Arrays.fill(b, 0, 4096, (byte)'V'); ((StringBuilder)fmt.out()).append(keyfmt); }
static RunningJob runJob(JobConf conf, Path inDir, Path outDir) throws IOException { return runJob(conf, inDir, outDir, conf.getNumMapTasks(), conf.getNumReduceTasks()); }
@Test public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME, jobConf); TableMapReduceUtil.limitNumMapTasks(TABLE_NAME, jobConf); assertEquals(1, jobConf.getNumMapTasks()); jobConf.setNumMapTasks(10); TableMapReduceUtil.setNumMapTasks(TABLE_NAME, jobConf); TableMapReduceUtil.limitNumMapTasks(TABLE_NAME, jobConf); assertEquals(1, jobConf.getNumMapTasks()); }
@Test public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME, jobConf); TableMapReduceUtil.limitNumMapTasks(TABLE_NAME, jobConf); assertEquals(1, jobConf.getNumMapTasks()); jobConf.setNumMapTasks(10); TableMapReduceUtil.setNumMapTasks(TABLE_NAME, jobConf); TableMapReduceUtil.limitNumMapTasks(TABLE_NAME, jobConf); assertEquals(1, jobConf.getNumMapTasks()); }