/** * @return RowResult * * @see org.apache.hadoop.mapred.RecordReader#createValue() */ public Result createValue() { return new Result(); }
@Override public Result createValue() { return new Result(); }
public static Result createCursorResult(Cursor cursor) { return new Result(cursor); }
@Override public ResultWritable createValue() { return new ResultWritable(new Result()); }
class TaskAsCallable implements Callable<Result> { @Override public Result call() { return a new Result() // this is where the work is done. } } ExecutorService executor = Executors.newFixedThreadPool(300); Future<Result> task = executor.submit(new TaskAsCallable()); Result result = task.get(); // this blocks until result is ready
void mockNoSnapshotSizes() throws IOException { Table quotaTable = mock(Table.class); when(conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)).thenReturn(quotaTable); when(quotaTable.get(any(Get.class))).thenReturn(new Result()); } }
/** * Verifies that one can't modify instance of EMPTY_RESULT. */ public void testEmptyResultIsReadonly() { Result emptyResult = Result.EMPTY_RESULT; Result otherResult = new Result(); try { emptyResult.copyFrom(otherResult); fail("UnsupportedOperationException should have been thrown!"); } catch (UnsupportedOperationException ex) { LOG.debug("As expected: " + ex.getMessage()); } try { emptyResult.setExists(true); fail("UnsupportedOperationException should have been thrown!"); } catch (UnsupportedOperationException ex) { LOG.debug("As expected: " + ex.getMessage()); } }
@Override public Result get(Get get) throws IOException { TimeRange range = get.getTimeRange(); String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), range.getMin(), range.getMax(), get.getMaxVersions()); if (get.getFilter() != null) { LOG.warn("filters not supported on gets"); } Result[] results = getResults(spec); if (results.length > 0) { if (results.length > 1) { LOG.warn("too many results for get (" + results.length + ")"); } return results[0]; } else { return new Result(); } }
ret.add(new Result(kvs));
if (value == null) value = new Result(); try { try {
/** * Create table data and run tests on specified htable using the * o.a.h.hbase.mapred API. * * @param table * @throws IOException */ static void runTestMapred(Table table) throws IOException { org.apache.hadoop.hbase.mapred.TableRecordReader trr = new org.apache.hadoop.hbase.mapred.TableRecordReader(); trr.setStartRow("aaa".getBytes()); trr.setEndRow("zzz".getBytes()); trr.setHTable(table); trr.setInputColumns(columns); trr.init(); Result r = new Result(); ImmutableBytesWritable key = new ImmutableBytesWritable(); boolean more = trr.next(key, r); assertTrue(more); checkResult(r, key, "aaa".getBytes(), "value aaa".getBytes()); more = trr.next(key, r); assertTrue(more); checkResult(r, key, "bbb".getBytes(), "value bbb".getBytes()); // no more data more = trr.next(key, r); assertFalse(more); }
@Test public void testGetHRegionInfo() throws IOException { assertNull(MetaTableAccessor.getRegionInfo(new Result())); List<Cell> kvs = new ArrayList<>(); Result r = Result.create(kvs); assertNull(MetaTableAccessor.getRegionInfo(r)); byte [] f = HConstants.CATALOG_FAMILY; // Make a key value that doesn't have the expected qualifier. kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.SERVER_QUALIFIER, f)); r = Result.create(kvs); assertNull(MetaTableAccessor.getRegionInfo(r)); // Make a key that does not have a regioninfo value. kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, f)); RegionInfo hri = MetaTableAccessor.getRegionInfo(Result.create(kvs)); assertTrue(hri == null); // OK, give it what it expects kvs.clear(); kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO))); hri = MetaTableAccessor.getRegionInfo(Result.create(kvs)); assertNotNull(hri); assertTrue(RegionInfo.COMPARATOR.compare(hri, RegionInfoBuilder.FIRST_META_REGIONINFO) == 0); }
Result r = new Result(); ImmutableBytesWritable key = new ImmutableBytesWritable();
private Tuple advance() throws SQLException { if (offset >= bytes.length) { return next = null; } int resultSize = ByteUtil.vintFromBytes(bytes, offset); offset += WritableUtils.getVIntSize(resultSize); ImmutableBytesWritable value = new ImmutableBytesWritable(bytes,offset,resultSize); offset += resultSize; Tuple result = new ResultTuple(new Result(value)); return next = result; }
private static Result getErrorResult(byte[] row, long timestamp, int errorCode) { byte[] errorCodeBuf = new byte[PDataType.INTEGER.getByteSize()]; PDataType.INTEGER.getCodec().encodeInt(errorCode, errorCodeBuf, 0); return new Result(Collections.singletonList( KeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, errorCodeBuf))); } /**