@Override protected void handleException(Throwable t) { if (t instanceof InterruptedIOException || t instanceof InterruptedException) { LOG.error("Caught throwable while processing event " + eventType, t); } else if (t instanceof RuntimeException) { server.abort("ServerAborting because a runtime exception was thrown", t); } else { // something fishy since we cannot flush the primary region until all retries (retries from // rpc times 35 trigger). We cannot close the region since there is no such mechanism to // close a region without master triggering it. We just abort the server for now. server.abort("ServerAborting because an exception was thrown", t); } }
public ZkCoordinatedStateManager(Server server) { this.watcher = server.getZooKeeper(); splitLogWorkerCoordination = new ZkSplitLogWorkerCoordination(server.getServerName(), watcher); splitLogManagerCoordination = new ZKSplitLogManagerCoordination(server.getConfiguration(), watcher); }
@Override public boolean isSourceActive() { return !this.server.isStopped() && this.sourceRunning; }
public HeapMemoryTunerChore() { super(server.getServerName() + "-HeapMemoryTunerChore", server, defaultChorePeriod); Class<? extends HeapMemoryTuner> tunerKlass = server.getConfiguration().getClass( HBASE_RS_HEAP_MEMORY_TUNER_CLASS, DefaultHeapMemoryTuner.class, HeapMemoryTuner.class); heapMemTuner = ReflectionUtils.newInstance(tunerKlass, server.getConfiguration()); tunerContext .setOffheapMemStore(regionServerAccounting.isOffheap()); }
public DummyNodeFailoverWorker(ServerName deadRS, Server s) throws Exception { this.deadRS = deadRS; this.server = s; this.rq = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), server.getConfiguration()); }
Mockito.when(server.getConfiguration()).thenReturn(CONF); Mockito.when(server.isStopped()).thenReturn(false); Mockito.when(server.isAborted()).thenReturn(false); RegionServerServices services = Mockito.mock(RegionServerServices.class); Mockito.when(server.isStopped()).thenReturn(true); if (logRoller != null) { logRoller.interrupt();
String backupZNode = ZNodePaths.joinZNode( this.watcher.getZNodePaths().backupMasterAddressesZNode, this.sn.toString()); while (!(master.isAborted() || master.isStopped())) { startupStatus.setStatus("Trying to register in ZK as active master"); startupStatus.setStatus(msg); } catch (KeeperException ke) { master.abort("Received an unexpected KeeperException, aborting", ke); return false; while (clusterHasActiveMaster.get() && !master.isStopped()) { try { clusterHasActiveMaster.wait(checkInterval); this.master.stop( "Cluster went down before this master became active");
when(server.getConfiguration()).thenReturn(CONF); when(server.isStopped()).thenReturn(false); when(server.isAborted()).thenReturn(false); RegionServerServices services = mock(RegionServerServices.class); Mockito.verify(server, Mockito.atLeast(0)).abort(Mockito.anyString(), Mockito.any(Throwable.class)); try { Mockito.verify(server, Mockito.atLeast(1)).abort(Mockito.anyString(), Mockito.any(Throwable.class)); break; Mockito.when(server.isStopped()).thenReturn(true); if (logRoller != null) logRoller.close(); if (region != null) {
Server server = new DummyServer("hostname0.example.org"); ReplicationQueueStorage rq = ReplicationStorageFactory .getReplicationQueueStorage(server.getZooKeeper(), server.getConfiguration()); rq.addWAL(server.getServerName(), "1", file); DummyNodeFailoverWorker w1 = new DummyNodeFailoverWorker(server.getServerName(), s1); DummyNodeFailoverWorker w2 = new DummyNodeFailoverWorker(server.getServerName(), s2); DummyNodeFailoverWorker w3 = new DummyNodeFailoverWorker(server.getServerName(), s3); + w3.isLogZnodesMapPopulated(); assertEquals(1, populatedMap); server.abort("", null);
Server server = new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com"); ReplicationQueueStorage queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf); queueStorage.addWAL(server.getServerName(), "1", file); ServerName serverName = server.getServerName(); List<String> unclaimed = queueStorage.getAllQueues(serverName); queueStorage.claimQueue(serverName, unclaimed.get(0), s1.getServerName()); queueStorage.removeReplicatorIfQueueIsEmpty(serverName); serverName = s1.getServerName(); unclaimed = queueStorage.getAllQueues(serverName); queueStorage.claimQueue(serverName, unclaimed.get(0), s2.getServerName()); queueStorage.removeReplicatorIfQueueIsEmpty(serverName); serverName = s2.getServerName(); unclaimed = queueStorage.getAllQueues(serverName); String queue3 = queueStorage.claimQueue(serverName, unclaimed.get(0), s3.getServerName()).getFirst(); queueStorage.removeReplicatorIfQueueIsEmpty(serverName); assertTrue(result.contains(server.getServerName())); assertTrue(result.contains(s1.getServerName())); assertTrue(result.contains(s2.getServerName())); server.stop("");
public synchronized void expireServer(final ServerName serverName) { if (serverName.equals(master.getServerName())) { if (!(master.isAborted() || master.isStopped())) { master.stop("We lost our znode?"); " expired; onlineServers=" + this.onlineServers.size()); if (this.onlineServers.isEmpty()) { master.stop("Cluster shutdown set; onlineServer=0");
if (server.isStopped()) { LOG.info("Not transferring queue since we are shutting down"); return; while (!queues.isEmpty()) { Pair<String, SortedSet<String>> peer = queueStorage.claimQueue(deadRS, queues.get(ThreadLocalRandom.current().nextInt(queues.size())), server.getServerName()); long sleep = sleepBeforeFailover / 2; if (!peer.getSecond().isEmpty()) { " If so, increase it for both client and server side." + e), deadRS, queueStorage.getRsNode(deadRS)); server.abort("Failed to claim queue from dead regionserver.", e); return; LOG.warn("Skipping failover for peer {} of node {}, peer is null", actualPeerId, deadRS); abortWhenFail(() -> queueStorage.removeQueue(server.getServerName(), queueId)); continue;
startupStatus.setStatus(msg); } catch (KeeperException ke) { master.abort("Received an unexpected KeeperException, aborting", ke); return false; while (this.clusterHasActiveMaster.get() && !this.master.isStopped()) { try { this.clusterHasActiveMaster.wait(); this.master.stop("Cluster went down before this master became active"); if (this.master.isStopped()) { return false;
@Override public ServerName getServerWALsBelongTo() { return server.getServerName(); }
CatalogJanitor(final Server server, final MasterServices services) { super("CatalogJanitor-" + server.getServerName().toShortString(), server, server .getConfiguration().getInt("hbase.catalogjanitor.interval", 300000)); this.server = server; this.services = services; this.connection = server.getConnection(); }
@Override public void nodeChildrenChanged(String path) { if (path.equals(watcher.rsZNode) && !server.isAborted() && !server.isStopped()) { try { List<String> servers = ZKUtil.listChildrenAndWatchThem(watcher, watcher.rsZNode); add(servers); } catch (IOException e) { server.abort("Unexpected zk exception getting RS nodes", e); } catch (KeeperException e) { server.abort("Unexpected zk exception getting RS nodes", e); } } }
/** @param server */ public LogRoller(final Server server, final RegionServerServices services) { super("LogRoller"); this.server = server; this.services = services; this.rollperiod = this.server.getConfiguration(). getLong("hbase.regionserver.logroll.period", 3600000); this.threadWakeFrequency = this.server.getConfiguration(). getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.checkLowReplicationInterval = this.server.getConfiguration().getLong( "hbase.regionserver.hlog.check.lowreplication.interval", 30 * 1000); }
if (TableName.META_TABLE_NAME.equals(snapshotTable)) { regionsAndLocations = MetaTableLocator.getMetaRegionsAndLocations( server.getZooKeeper()); } else { regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( server.getConnection(), snapshotTable, false);
@Override public void run() { try { logZnodesMap = new HashMap<>(); List<String> queues = rq.getAllQueues(deadRS); for (String queue : queues) { Pair<String, SortedSet<String>> pair = rq.claimQueue(deadRS, queue, server.getServerName()); if (pair != null) { logZnodesMap.put(pair.getFirst(), pair.getSecond()); } } server.abort("Done with testing", null); } catch (Exception e) { LOG.error("Got exception while running NodeFailoverWorker", e); } finally { latch.countDown(); } }