@Override public void close() throws IOException { delegate.close(); }
@Override public V createValue() { return delegate.createValue(); }
@Override public float getProgress() throws IOException { return delegate.getProgress(); } }
public static JobConf toJobConf(Configuration conf) { if (conf instanceof JobConf) { return (JobConf) conf; } return new JobConf(conf); } }
@BeforeClass public void setUp() { inputFormat = new TextInputFormat(); inputFormat.configure(new JobConf()); }
@Override public void configure(final JobConf c) { this.countPrefix = c.get("my.cntPrefix"); }
protected void fetchNext() throws IOException { hasNext = this.recordReader.next(key, value); fetched = true; }
@Override public void finalizeGlobal(int parallelism) throws IOException { try { JobContext jobContext = new JobContextImpl(this.jobConf, new JobID()); OutputCommitter outputCommitter = this.jobConf.getOutputCommitter(); // finalize HDFS output format outputCommitter.commitJob(jobContext); } catch (Exception e) { throw new RuntimeException(e); } }
@Override public K createKey() { return delegate.createKey(); }
/** * commit the task by moving the output file out from the temporary directory. * @throws java.io.IOException */ @Override public void close() throws IOException { // enforce sequential close() calls synchronized (CLOSE_MUTEX) { this.recordWriter.close(new HadoopDummyReporter()); if (this.outputCommitter.needsTaskCommit(this.context)) { this.outputCommitter.commitTask(this.context); } } }
@Override public String[] getHostnames() { try { return this.hadoopInputSplit.getLocations(); } catch (IOException e) { return new String[0]; } }
@Override public void writeRecord(Tuple2<K, V> record) throws IOException { this.recordWriter.write(record.f0, record.f1); } }
@Override public long getPos() throws IOException { return delegate.getPos(); }
/** * Maps a Hadoop Mapper (mapred API) to a Flink FlatMapFunction. * * @param hadoopMapper The Hadoop Mapper to wrap. */ public HadoopMapFunction(Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopMapper) { this(hadoopMapper, new JobConf()); }
@Override public void configure(JobConf c) { filterPrefix = c.get("my.filterPrefix"); }
@Override public void close() throws IOException { if (this.recordReader != null) { // enforce sequential close() calls synchronized (CLOSE_MUTEX) { this.recordReader.close(); } } }
/** * Maps two Hadoop Reducer (mapred API) to a combinable Flink GroupReduceFunction. * * @param hadoopReducer The Hadoop Reducer that is mapped to a GroupReduceFunction. * @param hadoopCombiner The Hadoop Reducer that is mapped to the combiner function. */ public HadoopReduceCombineFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer, Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN> hadoopCombiner) { this(hadoopReducer, hadoopCombiner, new JobConf()); }
@Override public void configure(final JobConf c) { this.countPrefix = c.get("my.cntPrefix"); }
/** * Maps a Hadoop Reducer (mapred API) to a non-combinable Flink GroupReduceFunction. * * @param hadoopReducer The Hadoop Reducer to wrap. */ public HadoopReduceFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer) { this(hadoopReducer, new JobConf()); }