@Override public ModelNode execute(EmbeddedCacheManager manager) { return new ModelNode(manager.isCoordinator()); } },
@Override public boolean isCoordinator() { return this.container.isCoordinator(); }
@Override public boolean amIStandby() { EmbeddedCacheManager manager = this.cm; if (manager == null) { // In case we cannot fetch the information, lets assume we // are standby, so to have less responsibility. return true; } return (!manager.isCoordinator()); }
@Override public ModelNode execute(EmbeddedCacheManager manager) { return new ModelNode(manager.isCoordinator()); } },
@CacheStarted public void cacheStarted(Event e) { localAddress = cacheManager.getAddress(); coordinator = cacheManager.isCoordinator(); }
/** * Get the mode handler */ public IndexerIoModeHandler getModeHandler() { if (modeHandler == null) { if (cache.getStatus() != ComponentStatus.RUNNING) { throw new IllegalStateException("The cache should be started first"); } synchronized (this) { if (modeHandler == null) { this.modeHandler = new IndexerIoModeHandler(cacheManager.isCoordinator() || cache.getAdvancedCache().getRpcManager() == null ? IndexerIoMode.READ_WRITE : IndexerIoMode.READ_ONLY); } } } return modeHandler; }
public void testIsCoordinator() throws Exception { cm1.getCache("cache"); // this will make sure any lazy components are started. assert cm1.isCoordinator() : "Should be coordinator!"; cm2 = addClusterEnabledCacheManager(); cm2.defineConfiguration("cache", cfg.build()); cm2.getCache("cache"); // this will make sure any lazy components are started. assert cm1.isCoordinator(); assert !cm2.isCoordinator(); TestingUtil.killCacheManagers(cm1); // wait till cache2 gets the view change notification TestingUtil.blockUntilViewsReceived(50000, false, cm2); assert cm2.isCoordinator(); } }
@CacheStarted public void logCacheManagerStart(final CacheStartedEvent event) { Logger log = LogManager.getLogger("MAIN"); EmbeddedCacheManager cacheManager = event.getCacheManager(); if (!cacheManager.getCacheManagerConfiguration().isClustered()) { // Local Infinispan (no cluster) log.info("[CM8008] Infinispan clustering: not configured"); } else if (clusterInfoLogged.compareAndSet(false, true)) { // Clustered Infinispan, log cluster conf for the first start cache event only // to prevent duplicate log lines with same cluster info log.info("[CM8007] Infinispan status: {}", cacheManager.getStatus()); log.info("[CM8009] Infinispan cluster name: {}", cacheManager.getCacheManagerConfiguration().transport().clusterName()); log.info("[CM8010] Infinispan cluster local node logical address: {}", cacheManager.getAddress()); log.info("[CM8019] Infinispan cluster local node physical address(es): {}", cacheManager.getCacheManagerConfiguration().transport().transport().getPhysicalAddresses()); log.info("[CM8011] Infinispan cluster coordinator logical address: {}", cacheManager.getCoordinator()); log.info("[CM8016] Infinispan cluster local node is coordinator: {}", cacheManager.isCoordinator()); log.info("[CM8012] Infinispan cluster members: {}", cacheManager.getMembers()); log.info("[CM8013] Infinispan cluster distributed sync timeout: {}", cacheManager.getCacheManagerConfiguration().transport().distributedSyncTimeout()); log.info("[CM8014] Infinispan cluster JGroups configuration file: {}", cacheManager.getCacheManagerConfiguration().transport().properties().getProperty("configurationFile")); } final Date now = new Date(); log.info("[CM8006] Started Infinispan {} cache {} in {} ms", cacheManager.getCacheConfiguration(event.getCacheName()).clustering().cacheModeString(), event.getCacheName(), now.getTime() - initiationTimestamp.getTime()); } }
public void testNodeCrash() { List<MagicKey> keys = init(); assertFalse(c2.getCacheManager().isCoordinator()); d2.setDiscardAll(true); TestingUtil.blockUntilViewsReceived(30000, false, c1, c3); TestingUtil.waitForNoRebalance(c1, c3); checkValuesInDC(keys, c1, c3); }
public void testCoordCrash() { List<MagicKey> keys = init(); assertTrue(c1.getCacheManager().isCoordinator()); d1.setDiscardAll(true); TestingUtil.blockUntilViewsReceived(30000, false, c2, c3); TestingUtil.waitForNoRebalance(c2, c3); checkValuesInDC(keys, c2, c3); }
/** * The cluster formation changed, so determine whether the current cache stopped being the coordinator or became * the coordinator. This method can lead to an optional in memory to cache loader state push, if the current cache * became the coordinator. This method will report any issues that could potentially arise from this push. */ @ViewChanged @Merged public void viewChange(ViewChangedEvent event) { LOG.info("The intercepted EventType is : " + event.getType()); LOG.info("The old list of members : " + event.getOldMembers()); LOG.info("The new list of members: : " + event.getNewMembers()); boolean tmp = isCoordinator(event.getNewMembers(), event.getLocalAddress()); if (coordinator != tmp || (tmp && event.isMergeView())) { activeStatusChanged(tmp); } }
@Override protected void createCacheManagers() throws Throwable { ConfigurationBuilder cb = new ConfigurationBuilder(); chf = new ControlledConsistentHashFactory.Scattered(new int[] {0, 1, 2}); cb.clustering().cacheMode(CacheMode.SCATTERED_SYNC).hash().numSegments(3).consistentHashFactory(chf); if (biasAcquisition != null) { cb.clustering().biasAcquisition(biasAcquisition); } addClusterEnabledCacheManager(cb); // If the updated topologies from old coord come when it's no longer in the view these are ignored. // Therefore we have to delay the view. viewLatch = new CountDownLatch(1); GlobalConfigurationBuilder gcb = new GlobalConfigurationBuilder(); gcb.transport().transport(new DelayedViewJGroupsTransport(viewLatch)); addClusterEnabledCacheManager(gcb, cb); // we need distinct transport instances on manager(1) and (2) gcb.transport().transport(new DelayedViewJGroupsTransport(viewLatch)); addClusterEnabledCacheManager(gcb, cb); assertTrue(cache(0).getCacheManager().isCoordinator()); // start other caches cache(1); cache(2); waitForClusterToForm(); }