public static org.apache.hadoop.mapred.JobContext createJobContext(org.apache.hadoop.mapreduce.JobContext context) { return createJobContext((JobConf)context.getConfiguration(), context.getJobID(), Reporter.NULL); }
public static TaskAttemptContext createTaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext context) { return createTaskAttemptContext(new JobConf(context.getConfiguration()), org.apache.hadoop.mapred.TaskAttemptID.forName(context.getTaskAttemptID().toString()), Reporter.NULL); }
JobContext currContext = HCatMapRedUtil.createJobContext( jobConf, context.getJobID(), InternalUtil.createReporter(HCatMapRedUtil.createTaskAttemptContext(jobConf, ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID()))); HCatOutputFormat.configureOutputStorageHandler(currContext, jobInfo, fullPartSpec);
HCatMapRedUtil.createTaskAttemptContext(context); configureDynamicStorageHandler(currTaskContext, dynamicPartValues); localJobInfo = HCatBaseOutputFormat.getJobInfo(currTaskContext.getConfiguration()); HCatMapRedUtil.createJobContext(currTaskContext); HCatMapRedUtil.createTaskAttemptContext(currJobContext.getJobConf(), currTaskContext.getTaskAttemptID(), currTaskContext.getProgressible());
@Override public void setupTask(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { getBaseOutputCommitter().setupTask(HCatMapRedUtil.createTaskAttemptContext(context)); } }
@Override public void setupJob(JobContext context) throws IOException { getBaseOutputCommitter().setupJob(HCatMapRedUtil.createJobContext(context)); }
@Override public void abortTask(TaskAttemptContext context) throws IOException { getBaseOutputCommitter().abortTask(HCatMapRedUtil.createTaskAttemptContext(context)); }
@Override public void abortJob(JobContext jobContext, State state) throws IOException { getBaseOutputCommitter().abortJob(HCatMapRedUtil.createJobContext(jobContext), state); cleanupJob(jobContext); }
@Override public boolean needsTaskCommit(TaskAttemptContext context) throws IOException { return getBaseOutputCommitter().needsTaskCommit(HCatMapRedUtil.createTaskAttemptContext(context)); }
@Override public void setupJob(JobContext context) throws IOException { if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) { getBaseOutputCommitter().setupJob(HCatMapRedUtil.createJobContext(context)); } // in dynamic usecase, called through FileRecordWriterContainer }
@Override public void commitTask(TaskAttemptContext context) throws IOException { getBaseOutputCommitter().commitTask(HCatMapRedUtil.createTaskAttemptContext(context)); }
@Override public void commitJob(JobContext jobContext) throws IOException { getBaseOutputCommitter().commitJob(HCatMapRedUtil.createJobContext(jobContext)); cleanupJob(jobContext); }
@Override public void setupTask(TaskAttemptContext context) throws IOException { getBaseOutputCommitter().setupTask(HCatMapRedUtil.createTaskAttemptContext(context)); }
@Override public void cleanupJob(JobContext context) throws IOException { getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context)); //Cancel HCat and JobTracker tokens IMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration()); client = HCatUtil.getHiveMetastoreClient(hiveConf); String tokenStrForm = client.getTokenStrForm(); if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) { client.cancelDelegationToken(tokenStrForm); } } catch (Exception e) { LOG.warn("Failed to cancel delegation token", e); } finally { HCatUtil.closeHiveClientQuietly(client); } } }
@Override public boolean needsTaskCommit(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { FileOutputFormatContainer.setWorkOutputPath(context); return getBaseOutputCommitter().needsTaskCommit(HCatMapRedUtil.createTaskAttemptContext(context)); } else { // called explicitly through FileRecordWriterContainer.close() if dynamic - return false by default return true; } }
HCatMapRedUtil.createJobContext(jobContext));
@Override public void abortTask(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { FileOutputFormatContainer.setWorkOutputPath(context); getBaseOutputCommitter().abortTask(HCatMapRedUtil.createTaskAttemptContext(context)); } else { try { TaskCommitContextRegistry.getInstance().abortTask(context); } finally { TaskCommitContextRegistry.getInstance().discardCleanupFor(context); } } }
.createJobContext(jobContext); if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) { getBaseOutputCommitter().abortJob(mapRedJobContext, state);
@Override public void commitTask(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { //See HCATALOG-499 FileOutputFormatContainer.setWorkOutputPath(context); getBaseOutputCommitter().commitTask(HCatMapRedUtil.createTaskAttemptContext(context)); } else { try { TaskCommitContextRegistry.getInstance().commitTask(context); } finally { TaskCommitContextRegistry.getInstance().discardCleanupFor(context); } } }
public static org.apache.hadoop.mapred.JobContext createJobContext(org.apache.hadoop.mapreduce.JobContext context) { return createJobContext((JobConf)context.getConfiguration(), context.getJobID(), Reporter.NULL); }