public ReadIopsCalculator(JobClient jobClient, DynamoDBClient dynamoDBClient, String tableName, int totalSegments, int localSegments) { this.jobConf = (JobConf) jobClient.getConf(); this.jobClient = jobClient; this.dynamoDBClient = dynamoDBClient; this.tableName = tableName; this.totalSegments = totalSegments; this.localSegments = localSegments; this.throughputPercent = Double.parseDouble(jobConf.get(DynamoDBConstants .THROUGHPUT_READ_PERCENT, DynamoDBConstants.DEFAULT_THROUGHPUT_PERCENTAGE)); log.info("Table name: " + tableName); log.info("Throughput percent: " + throughputPercent); }
public ReadIopsCalculator(JobClient jobClient, DynamoDBClient dynamoDBClient, String tableName, int totalSegments, int localSegments) { this.jobConf = (JobConf) jobClient.getConf(); this.jobClient = jobClient; this.dynamoDBClient = dynamoDBClient; this.tableName = tableName; this.totalSegments = totalSegments; this.localSegments = localSegments; this.throughputPercent = Double.parseDouble(jobConf.get(DynamoDBConstants .THROUGHPUT_READ_PERCENT, DynamoDBConstants.DEFAULT_THROUGHPUT_PERCENTAGE)); log.info("Table name: " + tableName); log.info("Throughput percent: " + throughputPercent); }
/** * Cancel a delegation token from the JobTracker * @param token the token to cancel * @throws IOException * @deprecated Use {@link Token#cancel} instead */ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token ) throws InvalidToken, IOException, InterruptedException { token.cancel(getConf()); }
/** * Cancel a delegation token from the JobTracker * @param token the token to cancel * @throws IOException * @deprecated Use {@link Token#cancel} instead */ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token ) throws InvalidToken, IOException, InterruptedException { token.cancel(getConf()); }
/** * Cancel a delegation token from the JobTracker * @param token the token to cancel * @throws IOException * @deprecated Use {@link Token#cancel} instead */ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token ) throws InvalidToken, IOException, InterruptedException { token.cancel(getConf()); }
/** * Cancel a delegation token from the JobTracker * @param token the token to cancel * @throws IOException * @deprecated Use {@link Token#cancel} instead */ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token ) throws InvalidToken, IOException, InterruptedException { token.cancel(getConf()); }
/** * Renew a delegation token * @param token the token to renew * @return true if the renewal went well * @throws InvalidToken * @throws IOException * @deprecated Use {@link Token#renew} instead */ public long renewDelegationToken(Token<DelegationTokenIdentifier> token ) throws InvalidToken, IOException, InterruptedException { return token.renew(getConf()); }
/** * Get a filesystem handle. We need this to prepare jobs * for submission to the MapReduce system. * * @return the filesystem handle. */ public synchronized FileSystem getFs() throws IOException { if (this.fs == null) { Path sysDir = getSystemDir(); this.fs = sysDir.getFileSystem(getConf()); } return fs; }
/** * Get a filesystem handle. We need this to prepare jobs * for submission to the MapReduce system. * * @return the filesystem handle. */ public synchronized FileSystem getFs() throws IOException { if (this.fs == null) { Path sysDir = getSystemDir(); this.fs = sysDir.getFileSystem(getConf()); } return fs; }
/** * Renew a delegation token * @param token the token to renew * @return true if the renewal went well * @throws InvalidToken * @throws IOException * @deprecated Use {@link Token#renew} instead */ public long renewDelegationToken(Token<DelegationTokenIdentifier> token ) throws InvalidToken, IOException, InterruptedException { return token.renew(getConf()); }
/** * Renew a delegation token * @param token the token to renew * @return true if the renewal went well * @throws InvalidToken * @throws IOException * @deprecated Use {@link Token#renew} instead */ public long renewDelegationToken(Token<DelegationTokenIdentifier> token ) throws InvalidToken, IOException, InterruptedException { return token.renew(getConf()); }
/** * Renew a delegation token * @param token the token to renew * @return true if the renewal went well * @throws InvalidToken * @throws IOException * @deprecated Use {@link Token#renew} instead */ public long renewDelegationToken(Token<DelegationTokenIdentifier> token ) throws InvalidToken, IOException, InterruptedException { return token.renew(getConf()); }
private void addYarnPerformanceMetrics(HadoopStepStats hdStepStats, String jobID, TwoNestedMap<String, String, Long> counters) { Optional<YarnApiHelper.ApplicationInfo> yarnAppInfo = Optional.empty(); Configuration config = hdStepStats.getJobClient().getConf(); try { JobConf conf = (JobConf)config; yarnAppInfo = YarnApiHelper.getYarnAppInfo(conf, jobID.replace("job", "application")); } catch (ClassCastException e) { LOG.error("The class of the configuration is not JobConf - instead it is " + config.getClass().getCanonicalName(), e); } if (yarnAppInfo.isPresent()) { counters.putAll(yarnAppInfo.get().asCounterMap()); } }
private void viewHistory(String outputDir, boolean all) throws IOException { HistoryViewer historyViewer = new HistoryViewer(outputDir, getConf(), all); historyViewer.print(); }
private void viewHistory(String outputDir, boolean all) throws IOException { HistoryViewer historyViewer = new HistoryViewer(outputDir, getConf(), all); historyViewer.print(); }
public static int calcMaxMapTasks(JobClient jobClient) throws IOException { JobConf conf = (JobConf) jobClient.getConf(); NodeCapacityProvider nodeCapacityProvider = new ClusterTopologyNodeCapacityProvider(conf); YarnContainerAllocator yarnContainerAllocator = new RoundRobinYarnContainerAllocator(); TaskCalculator taskCalculator = new TaskCalculator(jobClient, nodeCapacityProvider, yarnContainerAllocator); return taskCalculator.getMaxMapTasks(); }
public static int calcMaxMapTasks(JobClient jobClient) throws IOException { JobConf conf = (JobConf) jobClient.getConf(); NodeCapacityProvider nodeCapacityProvider = new ClusterTopologyNodeCapacityProvider(conf); YarnContainerAllocator yarnContainerAllocator = new RoundRobinYarnContainerAllocator(); TaskCalculator taskCalculator = new TaskCalculator(jobClient, nodeCapacityProvider, yarnContainerAllocator); return taskCalculator.getMaxMapTasks(); }
@Before public void setup() throws IOException { conf.setNumReduceTasks(REDUCERS); when(clusterStatus.getTaskTrackers()).thenReturn(NODES); when(client.getClusterStatus()).thenReturn(clusterStatus); when(client.getConf()).thenReturn(conf); taskCalculator = new TaskCalculator(client, nodeCapacityProvider, yarnContainerAllocator); }
@Before public void setup() { when(dynamoDBClient.describeTable(TABLE_NAME)).thenReturn(new TableDescription() .withBillingModeSummary( new BillingModeSummary().withBillingMode(DynamoDBConstants.BILLING_MODE_PROVISIONED)) .withProvisionedThroughput( new ProvisionedThroughputDescription().withReadCapacityUnits(READ_CAPACITY_UNITS))); JobConf jobConf = new JobConf(); jobConf.set(DynamoDBConstants.THROUGHPUT_READ_PERCENT, String.valueOf(THROUGHPUT_READ_PERCENT)); when(jobClient.getConf()).thenReturn(jobConf); readIopsCalculator = new ReadIopsCalculator(jobClient, dynamoDBClient, TABLE_NAME, TOTAL_SEGMETNS, LOCAL_SEGMENTS); }
@Before public void setup() { when(dynamoDBClient.describeTable(TABLE_NAME)).thenReturn(new TableDescription() .withBillingModeSummary( new BillingModeSummary().withBillingMode(DynamoDBConstants.BILLING_MODE_PROVISIONED)) .withProvisionedThroughput( new ProvisionedThroughputDescription().withWriteCapacityUnits(WRITE_CAPACITY_UNITS))); JobConf jobConf = new JobConf(); jobConf.setNumMapTasks(TOTAL_MAP_TASKS); jobConf.set("mapreduce.task.attempt.id", "attempt_m_1"); jobConf.set(DynamoDBConstants.THROUGHPUT_WRITE_PERCENT, String.valueOf (THROUGHPUT_WRITE_PERCENT)); when(jobClient.getConf()).thenReturn(jobConf); writeIopsCalculator = new WriteIopsCalculator(jobClient, dynamoDBClient, TABLE_NAME) { @Override int calculateMaxMapTasks(int totalMapTasks) { return MAX_CONCURRENT_MAP_TASKS; } }; }