@Test public void testAborting() throws Exception { final Configuration conf = HBaseConfiguration.create(); final Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(conf); ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, 1); executorService.submit(new EventHandler(server, EventType.M_SERVER_SHUTDOWN) { @Override public void process() throws IOException { throw new RuntimeException("Should cause abort"); } }); Waiter.waitFor(conf, 30000, new Predicate<Exception>() { @Override public boolean evaluate() throws Exception { try { verify(server, times(1)).abort(anyString(), (Throwable) anyObject()); return true; } catch (Throwable t) { return false; } } }); executorService.shutdown(); }
this.executorService = new ExecutorService(getName()); putUpWebUI(); } catch (Throwable t) {
ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, maxThreads);
String string = org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER .toString(); ExecutorService es = new ExecutorService(string); es.startExecutorService( string+"-"+string, 1);
@Before public void setup() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "split-log-worker-tests", null); ds = new DummyServer(zkw, conf); ZKUtil.deleteChildrenRecursively(zkw, zkw.getZNodePaths().baseZNode); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().baseZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode), not(is(-1))); LOG.debug(zkw.getZNodePaths().baseZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().splitLogZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().splitLogZNode), not(is(-1))); LOG.debug(zkw.getZNodePaths().splitLogZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().rsZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().rsZNode), not(is(-1))); SplitLogCounters.resetCounters(); executorService = new ExecutorService("TestSplitLogWorker"); executorService.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, 10); }
service_ = new ExecutorService();
@Test public void testAborting() throws Exception { final Configuration conf = HBaseConfiguration.create(); final Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(conf); ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, 1); executorService.submit(new EventHandler(server, EventType.M_SERVER_SHUTDOWN) { @Override public void process() throws IOException { throw new RuntimeException("Should cause abort"); } }); Waiter.waitFor(conf, 30000, new Predicate<Exception>() { @Override public boolean evaluate() throws Exception { try { verify(server, times(1)).abort(anyString(), (Throwable) anyObject()); return true; } catch (Throwable t) { return false; } } }); executorService.shutdown(); }
this.service = new ExecutorService(getServerName().toString()); this.service.startExecutorService(ExecutorType.RS_OPEN_REGION, conf.getInt("hbase.regionserver.executor.openregion.threads", 3));
ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, maxThreads);
this.conf, this.fs, this.rootDir, !canUpdateTableDescriptor(), false); service = new ExecutorService(getServerName().toShortString()); spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());
String string = org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER .toString(); ExecutorService es = new ExecutorService(string); es.startExecutorService( string+"-"+string, 1);
@Before public void setup() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "split-log-worker-tests", null); ds = new DummyServer(zkw, conf); ZKUtil.deleteChildrenRecursively(zkw, zkw.getZNodePaths().baseZNode); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().baseZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode), not(is(-1))); LOG.debug(zkw.getZNodePaths().baseZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().splitLogZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().splitLogZNode), not(is(-1))); LOG.debug(zkw.getZNodePaths().splitLogZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().rsZNode); assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().rsZNode), not(is(-1))); SplitLogCounters.resetCounters(); executorService = new ExecutorService("TestSplitLogWorker"); executorService.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, 10); }
ClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); if (!masterRecovery) { this.executorService = new ExecutorService(getServerName().toString()); this.serverManager = new ServerManager(this, this);