@Override public RecordReader<StaticBuffer, Iterable<Entry>> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { tableReader = (TableRecordReader) tableInputFormat.createRecordReader(inputSplit, taskAttemptContext); titanRecordReader = new HBaseBinaryRecordReader(tableReader, inputCFBytes); return titanRecordReader; }
@Override public RecordReader<NullWritable, FaunusVertex> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { return new TitanHBaseRecordReader(this.graph, this.vertexQuery, this.pathEnabled, (TableRecordReader) this.tableInputFormat.createRecordReader(inputSplit, taskAttemptContext)); }
@Override public RecordReader<StaticBuffer, Iterable<Entry>> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { tableReader = (TableRecordReader) tableInputFormat.createRecordReader(inputSplit, taskAttemptContext); titanRecordReader = new HBaseBinaryRecordReader(tableReader, inputCFBytes); return titanRecordReader; }
@Override public RecordReader<StaticBuffer, Iterable<Entry>> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { tableReader = tableInputFormat.createRecordReader(inputSplit, taskAttemptContext); return new HBaseBinaryRecordReader(tableReader, edgeStoreFamily); }
@Override public RecordReader<StaticBuffer, Iterable<Entry>> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { tableReader = tableInputFormat.createRecordReader(inputSplit, taskAttemptContext); return new HBaseBinaryRecordReader(tableReader, edgeStoreFamily); }
/** * Exec the HbaseSplit for a query against an Hbase table. * <p> * Does a whole bunch of fun stuff! Splitting on row ID ranges, applying secondary indexes, column pruning, * all sorts of sweet optimizations. What you have here is an important method. * * @param session Current session * @param split HbaseSplit * @param columnHandles List of HbaseColumnHandle * @return RecordReader<ImmutableBytesWritable , Result> for {@link org.apache.hadoop.mapreduce.RecordReader} */ public RecordReader<ImmutableBytesWritable, Result> execSplit(ConnectorSession session, HbaseSplit split, List<HbaseColumnHandle> columnHandles) throws IllegalAccessException, NoSuchFieldException, IOException, InterruptedException { TableName tableName = TableName.valueOf(split.getSchema(), split.getTable()); Scan scan = TabletSplitMetadata.convertStringToScan(split.getSplitMetadata().getScan()); buildScan(scan, session, columnHandles); TableInputFormat tableInputFormat = getNewTableInputFormat(connection, tableName); tableInputFormat.setScan(scan); RecordReader<ImmutableBytesWritable, Result> resultRecordReader = tableInputFormat.createRecordReader(new TableSplit( TableName.valueOf(split.getSplitMetadata().getTableName()), scan, split.getSplitMetadata().getStartRow(), split.getSplitMetadata().getEndRow(), split.getSplitMetadata().getRegionLocation(), split.getSplitMetadata().getLength() ), null); resultRecordReader.initialize(null, null); return resultRecordReader; }