/** * Return the status information about the Map-Reduce cluster */ public ClusterStatus getClusterStatus() throws Exception { ClusterStatus cs; try { JobConf job = new JobConf(conf); JobClient jc = new JobClient(job); cs = jc.getClusterStatus(); } catch (Exception e) { e.printStackTrace(); throw e; } LOG.info("Returning cluster status: " + cs.toString()); return cs; }
/** * Return the status information about the Map-Reduce cluster */ public ClusterStatus getClusterStatus() throws Exception { ClusterStatus cs; try { JobConf job = new JobConf(conf); JobClient jc = new JobClient(job); cs = jc.getClusterStatus(); } catch (Exception e) { e.printStackTrace(); throw e; } LOG.info("Returning cluster status: " + cs.toString()); return cs; }
public static final int getMaxNumReds () throws IOException { JobConf job = new JobConf(Utils.class); JobClient client = new JobClient(job); ClusterStatus cluster = client.getClusterStatus(); return cluster.getMaxReduceTasks(); }
public static final int getMaxNumMaps () throws IOException { JobConf job = new JobConf(Utils.class); JobClient client = new JobClient(job); ClusterStatus cluster = client.getClusterStatus(); return cluster.getMaxMapTasks(); }
public static final int getNumAvailableMaps () throws IOException { JobConf job = new JobConf(Utils.class); JobClient client = new JobClient(job); ClusterStatus cluster = client.getClusterStatus(); int maxMaps = cluster.getMaxMapTasks(); int runnings = cluster.getMapTasks(); return maxMaps - runnings; }
public static final int getNumAvailableReds () throws IOException { JobConf job = new JobConf(Utils.class); JobClient client = new JobClient(job); ClusterStatus cluster = client.getClusterStatus(); int maxReduces = cluster.getMaxReduceTasks(); int runnings = cluster.getReduceTasks(); return maxReduces - runnings; }
/** * Get status information about the Map-Reduce cluster. * * @return the status information about the Map-Reduce cluster as an object * of {@link ClusterStatus}. * @throws IOException */ public ClusterStatus getClusterStatus() throws IOException { return getClusterStatus(false); }
/** * Get status information about the max available Reduces in the cluster. * * @return the max available Reduces in the cluster * @throws IOException */ public int getDefaultReduces() throws IOException { return getClusterStatus().getMaxReduceTasks(); }
/** * Get status information about the max available Maps in the cluster. * * @return the max available Maps in the cluster * @throws IOException */ public int getDefaultMaps() throws IOException { return getClusterStatus().getMaxMapTasks(); }
/** * Get status information about the max available Maps in the cluster. * * @return the max available Maps in the cluster * @throws IOException */ public int getDefaultMaps() throws IOException { return getClusterStatus().getMaxMapTasks(); }
/** * Get status information about the max available Reduces in the cluster. * * @return the max available Reduces in the cluster * @throws IOException */ public int getDefaultReduces() throws IOException { return getClusterStatus().getMaxReduceTasks(); }
/** * Wait for the jobtracker to be RUNNING. */ static void waitForJobTracker(JobClient jobClient) { while (true) { try { ClusterStatus status = jobClient.getClusterStatus(); while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) { waitFor(100); status = jobClient.getClusterStatus(); } break; // means that the jt is ready } catch (IOException ioe) {} } }
private void waitForTaskTrackers(JobClient client) throws IOException { LOG.info("Waiting for tasktrackers..."); while (true) { ClusterStatus clusterStatus = client.getClusterStatus(); int taskTrackerCount = clusterStatus.getTaskTrackers(); if (taskTrackerCount > 0) { LOG.info("{} tasktrackers reported in. Continuing.", taskTrackerCount); break; } try { Thread.sleep(1000); } catch (InterruptedException e) { break; } } }
/** * Return the status information about the Map-Reduce cluster */ public ClusterStatus getClusterStatus() throws Exception { ClusterStatus cs; try { JobConf job = new JobConf(conf, ExecDriver.class); JobClient jc = new JobClient(job); cs = jc.getClusterStatus(); } catch (Exception e) { e.printStackTrace(); throw e; } LOG.info("Returning cluster status: " + cs.toString()); return cs; }
/** * Return the status information about the Map-Reduce cluster */ public ClusterStatus getClusterStatus() throws Exception { ClusterStatus cs; try { JobConf job = new JobConf(conf); JobClient jc = new JobClient(job); cs = jc.getClusterStatus(); } catch (Exception e) { e.printStackTrace(); throw e; } LOG.info("Returning cluster status: " + cs.toString()); return cs; }
/** * Wait for the jobtracker to be RUNNING. */ static void waitForJobTracker(JobClient jobClient) { while (true) { try { ClusterStatus status = jobClient.getClusterStatus(); while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) { waitFor(100); status = jobClient.getClusterStatus(); } break; // means that the jt is ready } catch (IOException ioe) {} } }
private static void waitForTaskTrackers(JobClient client) throws IOException { while (true) { ClusterStatus clusterStatus = client.getClusterStatus(); int taskTrackerCount = clusterStatus.getTaskTrackers(); if (taskTrackerCount > 0) { break; } try { System.out.print("."); Thread.sleep(1000); } catch (InterruptedException e) { break; } } }
private static String getMRStats() throws Exception { Configuration conf = CachedConfiguration.getInstance(); // No alternatives for hadoop 20 JobClient jc = new JobClient(new org.apache.hadoop.mapred.JobConf(conf)); ClusterStatus cs = jc.getClusterStatus(false); return "" + cs.getMapTasks() + " " + cs.getMaxMapTasks() + " " + cs.getReduceTasks() + " " + cs.getMaxReduceTasks() + " " + cs.getTaskTrackers() + " " + cs.getBlacklistedTrackers(); }
@Before public void setup() throws IOException { conf.setNumReduceTasks(REDUCERS); when(clusterStatus.getTaskTrackers()).thenReturn(NODES); when(client.getClusterStatus()).thenReturn(clusterStatus); when(client.getConf()).thenReturn(conf); taskCalculator = new TaskCalculator(client, nodeCapacityProvider, yarnContainerAllocator); }
private void runSleepJobTest(final JobClient jc, final Configuration conf) throws Exception { ClusterStatus c = jc.getClusterStatus(); int maxMaps = c.getMaxMapTasks() * 2; int maxReduces = maxMaps; int mapSleepTime = (int)c.getTTExpiryInterval(); int reduceSleepTime = mapSleepTime; String[] sleepJobArgs = new String[] { "-m", Integer.toString(maxMaps), "-r", Integer.toString(maxReduces), "-mt", Integer.toString(mapSleepTime), "-rt", Integer.toString(reduceSleepTime)}; runTest(jc, conf, "org.apache.hadoop.mapreduce.SleepJob", sleepJobArgs, new KillTaskThread(jc, 2, 0.2f, false, 2), new KillTrackerThread(jc, 2, 0.4f, false, 1)); LOG.info("SleepJob done"); }