/** * check to see if the distributed system is closing * * @return true if the distributed system is closing */ private boolean checkDSClosing(ClusterDistributionManager dm) { InternalDistributedSystem ds = dm.getSystem(); return (ds == null || ds.isDisconnecting()); }
/** * check to see if the distributed system is closing * * @return true if the distributed system is closing */ public boolean checkDSClosing(ClusterDistributionManager dm) { InternalDistributedSystem ds = dm.getSystem(); return (ds == null || ds.isDisconnecting()); }
/** * Returns true if this DM or the DistributedSystem owned by it is closing or is closed. */ protected boolean isCloseInProgress() { if (closeInProgress) { return true; } InternalDistributedSystem ds = getSystem(); return ds != null && ds.isDisconnecting(); }
@Override public DistributionConfig getConfig() { DistributionConfig result = null; InternalDistributedSystem sys = getSystem(); if (sys != null) { result = system.getConfig(); } return result; }
/** * check to see if the distributed system is closing * * @return true if the distributed system is closing */ private boolean checkDSClosing(ClusterDistributionManager dm) { InternalDistributedSystem ds = dm.getSystem(); return (ds == null || ds.isDisconnecting()); }
@Override public void membershipFailure(String reason, Throwable t) { exceptionInThreads = true; ClusterDistributionManager.this.rootCause = t; getSystem().disconnect(reason, t, true); }
/** * check to see if the distributed system is closing * * @return true if the distributed system is closing */ public boolean checkDSClosing(ClusterDistributionManager dm) { InternalDistributedSystem ds = dm.getSystem(); return (ds == null || ds.isDisconnecting()); }
@Override protected void process(ClusterDistributionManager dm) { Cache c = dm.getCache(); if (c != null) { List l = c.getCacheServers(); if (l != null) { Iterator i = l.iterator(); while (i.hasNext()) { CacheServerImpl bs = (CacheServerImpl) i.next(); CacheClientNotifier ccn = bs.getAcceptor().getCacheClientNotifier(); Set s = ccn.getDenylistedClient(); if (s != null) { if (s.remove(proxyID)) { DistributedSystem ds = dm.getSystem(); if (ds != null) { if (logger.isDebugEnabled()) { logger.debug( "Remove the client from deny list as its queue is already destroyed: {}", proxyID); } } } } } } } }
@Override protected void process(ClusterDistributionManager dm) { ArrayList grantors = new ArrayList(); // svc names grantor for ArrayList grantorVersions = new ArrayList(); // grantor versions ArrayList grantorSerialNumbers = new ArrayList(); // serial numbers of grantor svcs ArrayList nonGrantors = new ArrayList(); // svc names non-grantor for if (dm.waitForElder(this.getSender())) { GrantorRequestProcessor.readyForElderRecovery(dm.getSystem(), this.getSender(), null); DLockService.recoverRmtElder(grantors, grantorVersions, grantorSerialNumbers, nonGrantors); reply(dm, grantors, grantorVersions, grantorSerialNumbers, nonGrantors); } else if (dm.getOtherNormalDistributionManagerIds().isEmpty()) { // Either we're alone (and received a message from an unknown member) or else we haven't // yet processed a view. In either case, we clearly don't have any grantors, // so we return empty lists. logger.info(LogMarker.DLS_MARKER, "{}: returning empty lists because I know of no other members.", this); reply(dm, grantors, grantorVersions, grantorSerialNumbers, nonGrantors); } else { logger.info(LogMarker.DLS_MARKER, "{}: disregarding request from departed member.", this); } }
@Before public void setUp() { path = "path"; InternalCache cache = mock(InternalCache.class); dm = mock(ClusterDistributionManager.class); region = mock(LocalRegion.class); txCommitMessage = mock(TXCommitMessage.class); when(dm.getCache()).thenReturn(cache); when(cache.getRegionByPath(path)).thenReturn(region); when(dm.getSystem()).thenReturn(mock(InternalDistributedSystem.class)); }
@Before public void before() { member0 = mock(InternalDistributedMember.class); clusterDistributionManager = mock(ClusterDistributionManager.class); cancelCriterion = mock(CancelCriterion.class); system = mock(InternalDistributedSystem.class); systemCancelCriterion = mock(CancelCriterion.class); memberManager = mock(MembershipManager.class); when(clusterDistributionManager.getCancelCriterion()).thenReturn(cancelCriterion); when(clusterDistributionManager.getSystem()).thenReturn(system); when(system.getCancelCriterion()).thenReturn(systemCancelCriterion); when(clusterDistributionManager.getMembershipManager()).thenReturn(memberManager); }
/** starts periodic task to perform cleanup chores such as expire surprise members */ private void startCleanupTimer() { if (this.listener == null || listener.getDM() == null) { return; } DistributedSystem ds = this.listener.getDM().getSystem(); this.cleanupTimer = new SystemTimer(ds, true); SystemTimer.SystemTimerTask st = new SystemTimer.SystemTimerTask() { @Override public void run2() { cleanUpSurpriseMembers(); } }; this.cleanupTimer.scheduleAtFixedRate(st, surpriseMemberTimeout, surpriseMemberTimeout / 3); }
@Override protected void process(ClusterDistributionManager dm) { boolean isToShutdown = hasCache(dm); super.process(dm); if (isToShutdown) { // Do the disconnect in an async thread. The thread we are running // in is one in the dm threadPool so we do not want to call disconnect // from this thread because it prevents dm from cleaning up all its threads // and causes a 20 second delay. final InternalDistributedSystem ids = dm.getSystem(); if (ids.isConnected()) { Thread t = new LoggingThread("ShutdownAllRequestDisconnectThread", false, () -> { try { Thread.sleep(SLEEP_TIME_BEFORE_DISCONNECT_DS); } catch (InterruptedException ignore) { } ConnectionTable.threadWantsSharedResources(); if (ids.isConnected()) { ids.disconnect(); } }); t.start(); } } }
public InternalDistributedMember getElderId() throws DistributedSystemDisconnectedException { if (clusterDistributionManager.isCloseInProgress()) { throw new DistributedSystemDisconnectedException( "no valid elder when system is shutting down", clusterDistributionManager.getRootCause()); } clusterDistributionManager.getSystem().getCancelCriterion().checkCancelInProgress(null); return getElderCandidate(); }
@Override public void process(ClusterDistributionManager dm) { InternalDistributedSystem sys = dm.getSystem(); if (alertListenerExpected) { if (!dm.getAlertingService().removeAlertListener(getSender()) && !ignoreAlertListenerRemovalFailure) { logger.warn("Unable to remove console with id {} from alert listeners.", getSender()); } } GemFireStatSampler sampler = sys.getStatSampler(); if (sampler != null) { sampler.removeListenersByRecipient(getSender()); } dm.handleConsoleShutdown(getSender(), crashed, String.format("Reason for automatic admin disconnect : %s", reason)); }
@Override protected void process(ClusterDistributionManager dm) { Region r = getRegion(dm.getSystem()); if (r != null) { try { // LinkedList entries = getEntriesForRegion(r, this.getSender()); // new ResponseThread(this.getSender(), numResults, dm, this.snapshotId).start(); SnapshotResultMessage m = SnapshotResultMessage.create(r, snapshotId); m.setRecipient(this.getSender()); dm.putOutgoing(m); } catch (CacheException ex) { throw new GemFireCacheException(ex); } } }
@Override public void process(ClusterDistributionManager dm) { Region r = getRegion(dm.getSystem()); if (r != null) { try { if (action == ExpirationAction.LOCAL_DESTROY) { r.localDestroy(key); } else if (action == ExpirationAction.DESTROY) { r.destroy(key); } else if (action == ExpirationAction.INVALIDATE) { r.invalidate(key); } else if (action == ExpirationAction.LOCAL_INVALIDATE) { r.localInvalidate(key); } } catch (Exception e) { logger.warn("Failed attempt to destroy or invalidate entry {} {} from console at {}", new Object[] {r.getFullPath(), key, this.getSender()}); } } }
@Override public void process(ClusterDistributionManager dm) { Region r = getRegion(dm.getSystem()); if (r != null) { try { if (action == ExpirationAction.LOCAL_DESTROY) { r.localDestroyRegion(); } else if (action == ExpirationAction.DESTROY) { r.destroyRegion(); } else if (action == ExpirationAction.INVALIDATE) { r.invalidateRegion(); } else if (action == ExpirationAction.LOCAL_INVALIDATE) { r.localInvalidateRegion(); } } catch (Exception e) { logger.warn("Failed attempt to destroy or invalidate region {} from console at {}", new Object[] {r.getFullPath(), this.getSender()}); } } }
/** * Sends this request, waits for the AdminReponse, and returns it */ public AdminResponse sendAndWait(ClusterDistributionManager dm) { InternalDistributedMember recipient = this.getRecipient(); if (dm.getId().equals(recipient)) { // We're sending this message to ourselves, we won't need a // reply process. Besides, if we try to create one, we'll get // an assertion failure. this.msgId = -1; } else { this.processor = new AdminReplyProcessor(dm.getSystem(), recipient); this.msgId = this.processor.getProcessorId(); } return AdminWaiters.sendAndWait(this, dm); }
private synchronized void rescheduleTimer() { // cancel the old timer. Although cancelled, old task might execute one last // time if (timer != null) timer.cancel(); // Get the swarm. Currently rather UGLY. InternalDistributedSystem system = dm.getSystem(); if (system == null || system.getDistributionManager() != dm) { throw new org.apache.geode.distributed.DistributedSystemDisconnectedException( "This manager has been cancelled"); } // start and schedule new timer timer = new SystemTimer(system /* swarm */, true); EvaluateAlertDefnsTask task = new EvaluateAlertDefnsTask(); if (refreshAtFixedRate) { timer.scheduleAtFixedRate(task, 0, refreshInterval); } else { timer.schedule(task, 0, refreshInterval); } }