private void generateRows(int numberOfRows, Table ht, byte[] family, byte[] qf, byte[] value) throws IOException { for (int i = 0; i < numberOfRows; i++) { byte[] row = Bytes.toBytes(i); Put p = new Put(row); p.addColumn(family, qf, value); ht.put(p); } TEST_UTIL.flush(); }
@Test public void testDisabledWAL() throws Exception { LOG.info("Writing data to table " + tableName); Put p = new Put(Bytes.toBytes("row")); p.addColumn(fam, Bytes.toBytes("qual"), Bytes.toBytes("val")); table.put(p); LOG.info("Flushing table " + tableName); TEST_UTIL.flush(tableName); LOG.info("Getting data from table " + tableName); Get get = new Get(Bytes.toBytes("row")); Result result = table.get(get); assertNotNull(result.getValue(fam, Bytes.toBytes("qual"))); } }
public static void setUp(String regionImpl) { try { CONF.set(HConstants.REGION_IMPL, regionImpl); CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RefreshHFilesEndpoint.class.getName()); cluster = HTU.startMiniCluster(NUM_RS); // Create table table = HTU.createTable(TABLE_NAME, FAMILY, SPLIT_KEY); // this will create 2 regions spread across slaves HTU.loadNumericRows(table, FAMILY, 1, 20); HTU.flush(TABLE_NAME); } catch (Exception ex) { LOG.error("Couldn't finish setup", ex); } }
private static void loadData(final Table ht, final byte[][] families, final int rows, final int flushes) throws IOException { List<Put> puts = new ArrayList<>(rows); byte[] qualifier = Bytes.toBytes("val"); for (int i = 0; i < flushes; i++) { for (int k = 0; k < rows; k++) { byte[] row = Bytes.toBytes(random.nextLong()); Put p = new Put(row); for (int j = 0; j < families.length; ++j) { p.addColumn(families[j], qualifier, row); } puts.add(p); } ht.put(puts); TEST_UTIL.flush(); puts.clear(); } } }
private void generateRows(int numberOfRows, Table ht, byte[] family, byte[] qf, byte[] value) throws IOException { for (int i = 0; i < numberOfRows; i++) { byte[] row = Bytes.toBytes(i); Put p = new Put(row); p.addColumn(family, qf, value); p.setDurability(Durability.SKIP_WAL); ht.put(p); } TEST_UTIL.flush(); }
static void loadData(final TableName tableName, final byte[][] families, final int rows, final int flushes) throws IOException { AsyncTable<?> table = ASYNC_CONN.getTable(tableName); List<Put> puts = new ArrayList<>(rows); byte[] qualifier = Bytes.toBytes("val"); for (int i = 0; i < flushes; i++) { for (int k = 0; k < rows; k++) { byte[] row = Bytes.add(Bytes.toBytes(k), Bytes.toBytes(i)); Put p = new Put(row); for (int j = 0; j < families.length; ++j) { p.addColumn(families[j], qualifier, row); } puts.add(p); } table.putAll(puts).join(); TEST_UTIL.flush(); puts.clear(); } } }
private static void loadDataAndFlush() throws IOException { for (int i = 0; i < MAX_SPLIT_KEYS_NUM; i++) { Put put = new Put(integerToBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.add(VALUE, Bytes.toBytes(i))); TABLE.put(put); } TEST_UTIL.flush(TABLE_NAME); }
@Before public void setup() throws Exception { if (cacheType.equals("bucket")) { CONF.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); CONF.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 30); } cluster = HTU.startMiniCluster(NUM_RS); rs1 = cluster.getRegionServer(0); rs2 = cluster.getRegionServer(1); // Create table table = HTU.createTable(TABLE_NAME, FAMILY, SPLIT_KEY); HTU.loadNumericRows(table, FAMILY, 1, 10); HTU.flush(TABLE_NAME); }
@Test public void test() throws IOException, InterruptedException { try (Table t = UTIL.getConnection().getTable(NAME1)) { writeData(t); // Flush the data UTIL.flush(NAME1); // Issue a compaction UTIL.compact(NAME1, true); Thread.sleep(2000); } Set<String> jmxMetrics = readJmxMetricsWithRetry(); assertNotNull(jmxMetrics); long name1TableMetricsCount = jmxMetrics.stream().filter(metric -> metric.contains("MetaTable_table_" + NAME1)).count(); assertEquals(5L, name1TableMetricsCount); String putWithClientMetricNameRegex = "MetaTable_client_.+_put_request.*"; long putWithClientMetricsCount = jmxMetrics.stream().filter(metric -> metric.matches(putWithClientMetricNameRegex)) .count(); assertEquals(5L, putWithClientMetricsCount); }
@Test public void testWithColumnDeletes() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); // For row:0, col:0: insert versions 1 through 5. putNVersions(ht, FAMILY, 0, 0, 1, 5); TEST_UTIL.flush(tableName); // delete all versions before 4. deleteColumn(ht, FAMILY, 0, 0); // request a bunch of versions including the deleted version. We should // only get back entries for the versions that exist. Cell kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L)); assertEquals(0, kvs.length); ht.close(); }
@Test public void testJira6912() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); Table foo = TEST_UTIL.createTable(tableName, new byte[][] {FAMILY}, 10); List<Put> puts = new ArrayList<Put>(); for (int i=0;i !=100; i++){ Put put = new Put(Bytes.toBytes(i)); put.addColumn(FAMILY, FAMILY, Bytes.toBytes(i)); puts.add(put); } foo.put(puts); // If i comment this out it works TEST_UTIL.flush(); Scan scan = new Scan(); scan.setStartRow(Bytes.toBytes(1)); scan.setStopRow(Bytes.toBytes(3)); scan.addColumn(FAMILY, FAMILY); scan.setFilter(new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes(1)))); ResultScanner scanner = foo.getScanner(scan); Result[] bar = scanner.next(100); assertEquals(1, bar.length); }
@Test public void testWithMultipleVersionDeletes() throws IOException { LOG.info(name.getMethodName()); final TableName tableName = TableName.valueOf(name.getMethodName()); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); // For row:0, col:0: insert versions 1 through 5. putNVersions(ht, FAMILY, 0, 0, 1, 5); TEST_UTIL.flush(tableName); // delete all versions before 4. deleteAllVersionsBefore(ht, FAMILY, 0, 0, 4); // request a bunch of versions including the deleted version. We should // only get back entries for the versions that exist. Cell kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L)); assertEquals(0, kvs.length); ht.close(); }
@Test public void testWithFamilyDeletes() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); // For row:0, col:0: insert versions 1 through 5. putNVersions(ht, FAMILY, 0, 0, 1, 5); TEST_UTIL.flush(tableName); // delete all versions before 4. deleteFamily(ht, FAMILY, 0); // request a bunch of versions including the deleted version. We should // only get back entries for the versions that exist. Cell kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L)); assertEquals(0, kvs.length); ht.close(); }
@Test public void testIncrementWithDeletes() throws Exception { LOG.info("Starting " + this.name.getMethodName()); final TableName TABLENAME = TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); final byte[] COLUMN = Bytes.toBytes("column"); ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5); TEST_UTIL.flush(TABLENAME); Delete del = new Delete(ROW); ht.delete(del); ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5); Get get = new Get(ROW); Result r = ht.get(get); assertEquals(1, r.size()); assertEquals(5, Bytes.toLong(r.getValue(FAMILY, COLUMN))); }
@Test public void testSplit() throws IOException, InterruptedException, ExecutionException, TimeoutException { try (Table table = createTable(false)) { table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0))); table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(0))); } UTIL.flush(NAME); HRegionServer rs = UTIL.getRSForFirstRegionInTable(NAME); RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo(); UTIL.getAdmin().splitRegionAsync(region.getRegionName(), Bytes.toBytes(1)).get(1, TimeUnit.MINUTES); long maxSeqId = getMaxSeqId(rs, region); RegionLocator locator = UTIL.getConnection().getRegionLocator(NAME); HRegionLocation locA = locator.getRegionLocation(Bytes.toBytes(0), true); HRegionLocation locB = locator.getRegionLocation(Bytes.toBytes(1), true); assertEquals(maxSeqId + 1, locA.getSeqNum()); assertEquals(maxSeqId + 1, locB.getSeqNum()); }
@Test public void testFlushWithTableCompactionDisabled() throws Exception { HTableDescriptor htd = new HTableDescriptor(tableName); htd.setCompactionEnabled(false); TEST_UTIL.createTable(htd, new byte[][] { family }, null); // load the table for (int i = 0; i < blockingStoreFiles + 1; i ++) { TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(tableName), family); TEST_UTIL.flush(tableName); } // Make sure that store file number is greater than blockingStoreFiles + 1 Path tableDir = FSUtils.getTableDir(rootDir, tableName); Collection<String> hfiles = SnapshotTestingUtils.listHFileNames(fs, tableDir); assert(hfiles.size() > blockingStoreFiles + 1); } }
private void testWithVersionDeletes(boolean flushTables) throws IOException { final byte [] TABLE = Bytes.toBytes(name.getMethodName() + "_" + (flushTables ? "flush" : "noflush")); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(TableName.valueOf(TABLE), FAMILIES, Integer.MAX_VALUE); // For row:0, col:0: insert versions 1 through 5. putNVersions(ht, FAMILY, 0, 0, 1, 5); // delete version 4. deleteOneVersion(ht, FAMILY, 0, 0, 4); if (flushTables) { TEST_UTIL.flush(); } // request a bunch of versions including the deleted version. We should // only get back entries for the versions that exist. Cell kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L, 4L, 5L)); assertEquals(3, kvs.length); checkOneCell(kvs[0], FAMILY, 0, 0, 5); checkOneCell(kvs[1], FAMILY, 0, 0, 3); checkOneCell(kvs[2], FAMILY, 0, 0, 2); ht.close(); }
public void testWithVersionDeletes(boolean flushTables) throws IOException { LOG.info(name.getMethodName() + "_"+ (flushTables ? "flush" : "noflush")); final TableName tableName = TableName.valueOf(name.getMethodName() + "_" + (flushTables ? "flush" : "noflush")); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); // For row:0, col:0: insert versions 1 through 5. putNVersions(ht, FAMILY, 0, 0, 1, 5); if (flushTables) { TEST_UTIL.flush(tableName); } // delete version 4. deleteOneVersion(ht, FAMILY, 0, 0, 4); // request a bunch of versions including the deleted version. We should // only get back entries for the versions that exist. Cell kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L, 4L, 5L)); assertEquals(3, kvs.length); checkOneCell(kvs[0], FAMILY, 0, 0, 5); checkOneCell(kvs[1], FAMILY, 0, 0, 3); checkOneCell(kvs[2], FAMILY, 0, 0, 2); ht.close(); }
@Before public void setup() throws IOException, InterruptedException { // Create a table of three families. This will assign a region. TEST_UTIL.createTable(TABLENAME, FAMILIES); Table t = TEST_UTIL.getConnection().getTable(TABLENAME); TEST_UTIL.waitUntilNoRegionsInTransition(); // Load the table with data for all families TEST_UTIL.loadTable(t, FAMILIES); TEST_UTIL.flush(); t.close(); TEST_UTIL.ensureSomeRegionServersAvailable(2); }
/** * HBASE-52 * Add a means of scanning over all versions */ @Test public void testJiraTest52() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); byte [][] VALUES = makeNAscii(VALUE, 7); long [] STAMPS = makeStamps(7); Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10); // Insert lots versions Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); ht.put(put); getAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); scanAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); // Try same from storefile TEST_UTIL.flush(); getAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); scanAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); }