@Override public void batch(List<Row> writes, Object[] results) throws IOException, InterruptedException { table.batch(writes, results); /* table.flushCommits(); not needed anymore */ }
/** * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table. * @param connection connection we're using * @param mutations Puts and Deletes to execute on hbase:meta * @throws IOException */ public static void mutateMetaTable(final Connection connection, final List<Mutation> mutations) throws IOException { Table t = getMetaHTable(connection); try { debugLogMutations(mutations); t.batch(mutations, null); } catch (InterruptedException e) { InterruptedIOException ie = new InterruptedIOException(e.getMessage()); ie.initCause(e); throw ie; } finally { t.close(); } }
/** * Do the changes and handle the pool * @param tableName table to insert into * @param allRows list of actions * @throws IOException */ private void batch(TableName tableName, Collection<List<Row>> allRows) throws IOException { if (allRows.isEmpty()) { return; } Connection connection = getConnection(); try (Table table = connection.getTable(tableName)) { for (List<Row> rows : allRows) { table.batch(rows, null); } } catch (RetriesExhaustedWithDetailsException rewde) { for (Throwable ex : rewde.getCauses()) { if (ex instanceof TableNotFoundException) { throw new TableNotFoundException("'" + tableName + "'"); } } throw rewde; } catch (InterruptedException ix) { throw (InterruptedIOException) new InterruptedIOException().initCause(ix); } }
private void testAppend(Increment inc) throws Exception { checkResult(table.increment(inc)); List<Row> actions = Arrays.asList(inc, inc); Object[] results = new Object[actions.size()]; table.batch(actions, results); checkResult(results); }
private void testAppend(Append append) throws Exception { checkResult(table.append(append)); List<Row> actions = Arrays.asList(append, append); Object[] results = new Object[actions.size()]; table.batch(actions, results); checkResult(results); }
@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put, final WALEdit edit, final Durability durability) throws IOException { try (Table table = e.getEnvironment().getConnection().getTable(otherTable, getPool())) { Put p = new Put(new byte[]{'a'}); p.addColumn(family, null, new byte[]{'a'}); try { table.batch(Collections.singletonList(put), null); } catch (InterruptedException e1) { throw new IOException(e1); } completedWithPool[0] = true; } } }
@Test public void testHTableBatchWithEmptyPut ()throws Exception { Table table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), new byte[][] { FAMILY }); try { List actions = (List) new ArrayList(); Object[] results = new Object[2]; // create an empty Put Put put1 = new Put(ROW); actions.add(put1); Put put2 = new Put(ANOTHERROW); put2.addColumn(FAMILY, QUALIFIER, VALUE); actions.add(put2); table.batch(actions, results); fail("Empty Put should have failed the batch call"); } catch (IllegalArgumentException iae) { } finally { table.close(); } }
@Test public void testHTableWithLargeBatch() throws Exception { Table table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), new byte[][] { FAMILY }); int sixtyFourK = 64 * 1024; try { List actions = new ArrayList(); Object[] results = new Object[(sixtyFourK + 1) * 2]; for (int i = 0; i < sixtyFourK + 1; i ++) { Put put1 = new Put(ROW); put1.addColumn(FAMILY, QUALIFIER, VALUE); actions.add(put1); Put put2 = new Put(ANOTHERROW); put2.addColumn(FAMILY, QUALIFIER, VALUE); actions.add(put2); } table.batch(actions, results); } finally { table.close(); } }
@Test public void testBadFam() throws Exception { LOG.info("test=testBadFam"); Table table = UTIL.getConnection().getTable(TEST_TABLE); List<Row> actions = new ArrayList<>(); Put p = new Put(Bytes.toBytes("row1")); p.addColumn(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value")); actions.add(p); p = new Put(Bytes.toBytes("row2")); p.addColumn(BYTES_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value")); actions.add(p); // row1 and row2 should be in the same region. Object [] r = new Object[actions.size()]; try { table.batch(actions, r); fail(); } catch (RetriesExhaustedWithDetailsException ex) { LOG.debug(ex.toString(), ex); // good! assertFalse(ex.mayHaveClusterIssues()); } assertEquals(2, r.length); assertTrue(r[0] instanceof Throwable); assertTrue(r[1] instanceof Result); table.close(); }
new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE))); Object[] batchResult = new Object[1]; t.batch(Arrays.asList(arm), batchResult); new Put(ROW).addColumn(FAMILY, QUALIFIERS[1], VALUE), new Delete(ROW).addColumns(FAMILY, QUALIFIERS[0]))); t.batch(Arrays.asList(arm), batchResult); r = t.get(g); assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1]))); arm = RowMutations.of(Collections.singletonList( new Put(ROW).addColumn(new byte[]{'b', 'o', 'g', 'u', 's'}, QUALIFIERS[0], VALUE))); t.batch(Arrays.asList(arm), batchResult); fail("Expected RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException"); } catch(RetriesExhaustedWithDetailsException e) {
@Test public void testBatchWithDelete() throws Exception { LOG.info("test=testBatchWithDelete"); Table table = UTIL.getConnection().getTable(TEST_TABLE); // Load some data List<Put> puts = constructPutRequests(); Object[] results = new Object[puts.size()]; table.batch(puts, results); validateSizeAndEmpty(results, KEYS.length); // Deletes List<Row> deletes = new ArrayList<>(); for (int i = 0; i < KEYS.length; i++) { Delete delete = new Delete(KEYS[i]); delete.addFamily(BYTES_FAMILY); deletes.add(delete); } results= new Object[deletes.size()]; table.batch(deletes, results); validateSizeAndEmpty(results, KEYS.length); // Get to make sure ... for (byte[] k : KEYS) { Get get = new Get(k); get.addColumn(BYTES_FAMILY, QUALIFIER); Assert.assertFalse(table.exists(get)); } table.close(); }
@Test public void testBatchAppendWithReturnResultFalse() throws Exception { LOG.info("Starting testBatchAppendWithReturnResultFalse"); final TableName tableName = TableName.valueOf(name.getMethodName()); Table table = TEST_UTIL.createTable(tableName, FAMILY); Append append1 = new Append(Bytes.toBytes("row1")); append1.setReturnResults(false); append1.addColumn(FAMILY, Bytes.toBytes("f1"), Bytes.toBytes("value1")); Append append2 = new Append(Bytes.toBytes("row1")); append2.setReturnResults(false); append2.addColumn(FAMILY, Bytes.toBytes("f1"), Bytes.toBytes("value2")); List<Append> appends = new ArrayList<>(); appends.add(append1); appends.add(append2); Object[] results = new Object[2]; table.batch(appends, results); assertTrue(results.length == 2); for(Object r : results) { Result result = (Result)r; assertTrue(result.isEmpty()); } table.close(); }
table.batch(actions, multiRes); validateResult(multiRes[1], QUAL1, Bytes.toBytes("abcdef")); validateResult(multiRes[1], QUAL4, Bytes.toBytes("xyz"));
@Test public void testBatchIncrementsWithReturnResultFalse() throws Exception { LOG.info("Starting testBatchIncrementsWithReturnResultFalse"); final TableName tableName = TableName.valueOf(name.getMethodName()); Table table = TEST_UTIL.createTable(tableName, FAMILY); Increment inc1 = new Increment(Bytes.toBytes("row2")); inc1.setReturnResults(false); inc1.addColumn(FAMILY, Bytes.toBytes("f1"), 1); Increment inc2 = new Increment(Bytes.toBytes("row2")); inc2.setReturnResults(false); inc2.addColumn(FAMILY, Bytes.toBytes("f1"), 1); List<Increment> incs = new ArrayList<>(); incs.add(inc1); incs.add(inc2); Object[] results = new Object[2]; table.batch(incs, results); assertTrue(results.length == 2); for(Object r : results) { Result result = (Result)r; assertTrue(result.isEmpty()); } table.close(); }
@Test public void testHTableDeleteWithList() throws Exception { LOG.info("test=testHTableDeleteWithList"); Table table = UTIL.getConnection().getTable(TEST_TABLE); // Load some data List<Put> puts = constructPutRequests(); Object[] results = new Object[puts.size()]; table.batch(puts, results); validateSizeAndEmpty(results, KEYS.length); // Deletes ArrayList<Delete> deletes = new ArrayList<>(); for (int i = 0; i < KEYS.length; i++) { Delete delete = new Delete(KEYS[i]); delete.addFamily(BYTES_FAMILY); deletes.add(delete); } table.delete(deletes); Assert.assertTrue(deletes.isEmpty()); // Get to make sure ... for (byte[] k : KEYS) { Get get = new Get(k); get.addColumn(BYTES_FAMILY, QUALIFIER); Assert.assertFalse(table.exists(get)); } table.close(); }
@Test public void testBatchWithPut() throws Exception { LOG.info("test=testBatchWithPut"); Table table = CONNECTION.getTable(TEST_TABLE); // put multiple rows using a batch List<Put> puts = constructPutRequests(); Object[] results = new Object[puts.size()]; table.batch(puts, results); validateSizeAndEmpty(results, KEYS.length); if (true) { int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(); assert liveRScount > 0; JVMClusterUtil.RegionServerThread liveRS = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().get(0); liveRS.getRegionServer().abort("Aborting for tests", new Exception("testBatchWithPut")); puts = constructPutRequests(); try { results = new Object[puts.size()]; table.batch(puts, results); } catch (RetriesExhaustedWithDetailsException ree) { LOG.info(ree.getExhaustiveDescription()); table.close(); throw ree; } validateSizeAndEmpty(results, KEYS.length); } validateLoadedData(table); table.close(); }
Object[] objs = new Object[batches.size()]; try (Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME)) { table.batch(batches, objs); fail("Where is the exception? We put the malformed cells!!!"); } catch (RetriesExhaustedWithDetailsException e) {
/** * This is for testing the active number of threads that were used while * doing a batch operation. It inserts one row per region via the batch * operation, and then checks the number of active threads. * <p/> * For HBASE-3553 */ @Test public void testActiveThreadsCount() throws Exception { UTIL.getConfiguration().setLong("hbase.htable.threads.coresize", slaves + 1); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration())) { ThreadPoolExecutor executor = HTable.getDefaultExecutor(UTIL.getConfiguration()); try { try (Table t = connection.getTable(TEST_TABLE, executor)) { List<Put> puts = constructPutRequests(); // creates a Put for every region t.batch(puts, null); HashSet<ServerName> regionservers = new HashSet<>(); try (RegionLocator locator = connection.getRegionLocator(TEST_TABLE)) { for (Row r : puts) { HRegionLocation location = locator.getRegionLocation(r.getRow()); regionservers.add(location.getServerName()); } } assertEquals(regionservers.size(), executor.getLargestPoolSize()); } } finally { executor.shutdownNow(); } } }
Object[] results3 = new Object[actions.size()]; Object[] results1 = results3; hTableInterface.batch(actions, results1); assertEquals(MyObserver.tr2.getMin(), range2.getMin()); assertEquals(MyObserver.tr2.getMax(), range2.getMax());