@Before public void setUp() throws Exception { /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on * implicit RS timing. * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete * compact timestamps are tracked. Otherwise, forced major compaction will not purge * Delete's having the same timestamp. see ScanQueryMatcher.match(): * if (retainDeletesInOutput * || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp) * <= timeToPurgeDeletes) ... ) * */ EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); }
@After public void cleanupFS() throws Exception { if (fs.exists(root)) { if (!fs.delete(root, true)) { throw new IOException("Failed to delete root test dir: " + root); } if (!fs.mkdirs(root)) { throw new IOException("Failed to create root test dir: " + root); } } EnvironmentEdgeManagerTestHelper.reset(); }
try { final long now = System.currentTimeMillis(); EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() { @Override public long currentTime() { EnvironmentEdgeManagerTestHelper.reset();
try { final long now = System.currentTimeMillis(); EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() { @Override public long currentTime() { EnvironmentEdgeManagerTestHelper.reset();
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.thrift.http", true); TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false); TEST_UTIL.startMiniCluster(); //ensure that server time increments every time we do an operation, otherwise //successive puts having the same timestamp will override each other EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); }
@AfterClass public static void tearDown() throws Exception { EnvironmentEdgeManagerTestHelper.reset(); }
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false); TEST_UTIL.startMiniCluster(); //ensure that server time increments every time we do an operation, otherwise //successive puts having the same timestamp will override each other EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); }
@After public void tearDown() throws Exception { EnvironmentEdgeManagerTestHelper.reset(); if (region != null) region.close(true); }
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
@After public void tearDown() throws Exception { EnvironmentEdgeManagerTestHelper.reset(); LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir()); TEST_UTIL.cleanupTestDir(); }
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, REFRESH_TIME); TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); TEST_UTIL.startMiniCluster(1); TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); QuotaCache.TEST_FORCE_REFRESH = true; tables = new Table[TABLE_NAMES.length]; for (int i = 0; i < TABLE_NAMES.length; ++i) { tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY); } envEdge = new ManualEnvironmentEdge(); envEdge.setValue(EnvironmentEdgeManager.currentTime()); EnvironmentEdgeManagerTestHelper.injectEdge(envEdge); }
@After public void tearDown() throws Exception { EnvironmentEdgeManagerTestHelper.reset(); LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir()); TEST_UTIL.cleanupTestDir(); }
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
@After public void tearDown() throws Exception { EnvironmentEdgeManagerTestHelper.reset(); LOG.info("Cleaning test directory: " + test_util.getDataTestDir()); test_util.cleanupTestDir(); }
byte[][] families = { fam }; this.region = initHRegion(tableName, method, CONF, families); EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
@After public void tearDown() throws IOException { // Region may have been closed, but it is still no harm if we close it again here using HTU. HBaseTestingUtility.closeRegionAndWAL(region); EnvironmentEdgeManagerTestHelper.reset(); LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir()); TEST_UTIL.cleanupTestDir(); }
public void doTestDelete_AndPostInsert(Delete delete) throws IOException, InterruptedException { this.region = initHRegion(tableName, method, CONF, fam1); EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); Put put = new Put(row); put.addColumn(fam1, qual1, value1);
@After public void tearDown() throws Exception { EnvironmentEdgeManagerTestHelper.reset(); if (store != null) { try { store.close(); } catch (IOException e) { } store = null; } if (region != null) { region.close(); region = null; } }
int ttl = 4; IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(); EnvironmentEdgeManagerTestHelper.injectEdge(edge);
@After public void tearDown() throws Exception { if (reader != null) { reader.close(); } if (primaryRegion != null) { HBaseTestingUtility.closeRegionAndWAL(primaryRegion); } if (secondaryRegion != null) { HBaseTestingUtility.closeRegionAndWAL(secondaryRegion); } EnvironmentEdgeManagerTestHelper.reset(); }