@Override public TupleEntryIterator openForRead(FlowProcess<JobConf> flowProcess, RecordReader input) throws IOException { return new HadoopTupleEntrySchemeIterator(flowProcess, this, input); }
@Override protected RecordReader wrapInput( RecordReader recordReader ) { if( measuredRecordReader == null ) measuredRecordReader = new MeasuredRecordReader( getFlowProcess(), SliceCounters.Read_Duration ); measuredRecordReader.setRecordReader( super.wrapInput( recordReader ) ); return measuredRecordReader; } }
public HadoopTupleEntrySchemeIterator( FlowProcess<? extends Configuration> flowProcess, Tap parentTap, RecordReader recordReader ) throws IOException { this( flowProcess, parentTap, parentTap.getScheme(), makeIterator( flowProcess, parentTap, recordReader ) ); }
@Override protected RecordReader wrapInput( RecordReader recordReader ) { if( measuredRecordReader == null ) measuredRecordReader = new MeasuredRecordReader( getFlowProcess(), SliceCounters.Read_Duration ); measuredRecordReader.setRecordReader( super.wrapInput( recordReader ) ); return measuredRecordReader; } }
public HadoopTupleEntrySchemeIterator( FlowProcess<? extends Configuration> flowProcess, Tap parentTap, RecordReader recordReader ) throws IOException { this( flowProcess, parentTap, parentTap.getScheme(), makeIterator( flowProcess, parentTap, recordReader ) ); }
@Override public TupleEntryIterator openForRead( FlowProcess<? extends Configuration> flowProcess, RecordReader input ) throws IOException { // input may be null when this method is called on the client side or cluster side when accumulating // for a HashJoin return new HadoopTupleEntrySchemeIterator( flowProcess, this, input ); }
@Override public TupleEntryIterator openForRead( FlowProcess<? extends Configuration> flowProcess, RecordReader input ) throws IOException { // input may be null when this method is called on the client side or cluster side when accumulating // for a HashJoin return new HadoopTupleEntrySchemeIterator( flowProcess, this, input ); }
@Override public TupleEntryIterator openForRead( FlowProcess<JobConf> flowProcess, RecordReader<TupleWrapper, NullWritable> input ) throws IOException { // input may be null when this method is called on the client side or cluster side when accumulating // for a HashJoin return new HadoopTupleEntrySchemeIterator( flowProcess, this, input ); }
@Override public TupleEntryIterator openForRead( FlowProcess<? extends Configuration> flowProcess, RecordReader input ) throws IOException { // input may be null when this method is called on the client side or // cluster side when accumulating for a HashJoin return new HadoopTupleEntrySchemeIterator( flowProcess, this, input ); }
@Override public TupleEntryIterator openForRead(FlowProcess<JobConf> jobConfFlowProcess, RecordReader recordReader) throws IOException { return new HadoopTupleEntrySchemeIterator(jobConfFlowProcess, this, recordReader); }
@Override public TupleEntryIterator openForRead(FlowProcess<JobConf> flowProcess, RecordReader input) throws IOException { return new HadoopTupleEntrySchemeIterator(flowProcess, this, input); }
@Override protected TupleEntrySchemeIterator createTupleEntrySchemeIterator( FlowProcess<? extends Configuration> flowProcess, Tap parent, String path, RecordReader recordReader ) throws IOException { return new HadoopTupleEntrySchemeIterator( flowProcess, new Hfs( parent.getScheme(), path ), recordReader ); }
@Override protected TupleEntrySchemeIterator createTupleEntrySchemeIterator( FlowProcess<? extends Configuration> flowProcess, Tap parent, String path, RecordReader recordReader ) throws IOException { return new HadoopTupleEntrySchemeIterator( flowProcess, new Hfs( parent.getScheme(), path ), recordReader ); }