/** * Create a consistent hash factory with a single segment. */ public ControlledConsistentHashFactory(Trait<CH> trait, int primaryOwnerIndex, int... backupOwnerIndexes) { super(trait, 1); setOwnerIndexes(primaryOwnerIndex, backupOwnerIndexes); }
@Override public void setOwnerIndexes(int[][] segmentOwners) { super.setOwnerIndexes(segmentOwners); } }
@Override public void setOwnerIndexes(int primaryOwnerIndex, int... backupOwnerIndexes) { super.setOwnerIndexes(primaryOwnerIndex); }
@AfterMethod public void resetFactory() { factory.setOwnerIndexes(0, 1); }
@BeforeMethod protected void beforeMethod() { factory.setOwnerIndexes(1, 2); }
/** * Create a consistent hash factory with multiple segments. */ public ControlledConsistentHashFactory(Trait<CH> trait, int[][] segmentOwners) { super(trait, segmentOwners.length); if (segmentOwners.length == 0) throw new IllegalArgumentException("Need at least one set of owners"); setOwnerIndexes(segmentOwners); }
public void setOwnerIndexes(int primaryOwnerIndex, int... backupOwnerIndexes) { int[] firstSegmentOwners = concatOwners(primaryOwnerIndex, backupOwnerIndexes); setOwnerIndexes(new int[][]{firstSegmentOwners}); }
consistentHashFactory.setOwnerIndexes(new int[][]{{0, 1}, {0, 2}, {2, 1}, {1, 0}});
public void testOtherNodeBecomingPrimaryFromNotAnOwner() throws Exception { final Cache<Object, String> cache0 = cache(0, CACHE_NAME); Cache<Object, String> cache1 = cache(1, CACHE_NAME); Cache<Object, String> cache2 = cache(2, CACHE_NAME); cache1.put(KEY, VALUE); ClusterListener listener = new ClusterListener(); cache2.addListener(listener); factory.setOwnerIndexes(0, 1); log.trace("Triggering rebalance to cause segment ownership to change"); factory.triggerRebalance(cache0); eventually(new Condition() { @Override public boolean isSatisfied() throws Exception { return cache0.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).containsKey(KEY); } }); TestingUtil.waitForNoRebalance(cache0, cache1, cache2); assertEquals(listener.events.size(), 0); }
public void testClusterListenerNodeBecomingPrimaryFromNotAnOwner() throws Exception { final Cache<Object, String> cache0 = cache(0, CACHE_NAME); Cache<Object, String> cache1 = cache(1, CACHE_NAME); Cache<Object, String> cache2 = cache(2, CACHE_NAME); cache1.put(KEY, VALUE); ClusterListener listener = new ClusterListener(); cache0.addListener(listener); factory.setOwnerIndexes(0, 1); log.trace("Triggering rebalance to cause segment ownership to change"); factory.triggerRebalance(cache0); eventually(new Condition() { @Override public boolean isSatisfied() throws Exception { return cache0.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).containsKey(KEY); } }); TestingUtil.waitForNoRebalance(cache0, cache1, cache2); assertEquals(listener.events.size(), 0); }
public void testOtherNodeBecomingBackupFromNotAnOwner() throws Exception { final Cache<Object, String> cache0 = cache(0, CACHE_NAME); Cache<Object, String> cache1 = cache(1, CACHE_NAME); Cache<Object, String> cache2 = cache(2, CACHE_NAME); cache1.put(KEY, VALUE); ClusterListener listener = new ClusterListener(); cache2.addListener(listener); factory.setOwnerIndexes(1, 0); log.trace("Triggering rebalance to cause segment ownership to change"); factory.triggerRebalance(cache0); eventually(new Condition() { @Override public boolean isSatisfied() throws Exception { return cache0.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL, Flag.SKIP_OWNERSHIP_CHECK).containsKey(KEY); } }); TestingUtil.waitForNoRebalance(cache0, cache1, cache2); assertEquals(listener.events.size(), 0); }
@InCacheMode(CacheMode.DIST_SYNC) public void testClusterListenerNodeBecomingBackupFromNotAnOwner() throws Exception { final Cache<Object, String> cache0 = cache(0, CACHE_NAME); Cache<Object, String> cache1 = cache(1, CACHE_NAME); Cache<Object, String> cache2 = cache(2, CACHE_NAME); cache1.put(KEY, VALUE); ClusterListener listener = new ClusterListener(); cache0.addListener(listener); factory.setOwnerIndexes(1, 0); log.trace("Triggering rebalance to cause segment ownership to change"); factory.triggerRebalance(cache0); eventually(new Condition() { @Override public boolean isSatisfied() throws Exception { return cache0.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).containsKey(KEY); } }); TestingUtil.waitForNoRebalance(cache0, cache1, cache2); assertEquals(listener.events.size(), 0); }
factory.setOwnerIndexes(0, 2);
factory.setOwnerIndexes(0, 2);
private void testBeforeTopology(BiFunction<FunctionalMap.ReadWriteMap<String, Integer>, String, Integer> op, int expectedIncrement) throws Exception { cache(0).put("key", 1); // Blocking on receiver side. We cannot block the StateResponseCommand on the server side since // the InternalCacheEntries in its state are the same instances of data stored in DataContainer // - therefore when the command is blocked on sender the command itself would be mutated by applying // the transaction below. BlockingStateConsumer bsc2 = TestingUtil.wrapComponent(cache(2), StateConsumer.class, BlockingStateConsumer::new); tm(2).begin(); FunctionalMap.ReadWriteMap<String, Integer> rw = ReadWriteMapImpl.create( FunctionalMapImpl.create(this.<String, Integer>cache(2).getAdvancedCache())); assertEquals(new Integer(1), op.apply(rw, "key")); Transaction tx = tm(2).suspend(); chf.setOwnerIndexes(0, 2); Future<?> future = fork(() -> { TestResourceTracker.testThreadStarted(this); addClusterEnabledCacheManager(cb).getCache(); }); bsc2.await(); DistributionInfo distributionInfo = cache(2).getAdvancedCache().getDistributionManager().getCacheTopology().getDistribution("key"); assertFalse(distributionInfo.isReadOwner()); assertTrue(distributionInfo.isWriteBackup()); tm(2).resume(tx); tm(2).commit(); bsc2.unblock(); future.get(10, TimeUnit.SECONDS); InternalCacheEntry<Object, Object> ice = cache(2).getAdvancedCache().getDataContainer().get("key"); assertEquals("Current ICE: " + ice, 1 + expectedIncrement, ice.getValue()); }
private void startRebalance() throws Exception { InvocationMatcher rebalanceCompletedMatcher = matchMethodCall("handleRebalancePhaseConfirm") .withParam(1, address(2)).matchCount(0).build(); advanceOnGlobalComponentMethod(sequencer, manager(0), ClusterTopologyManager.class, rebalanceCompletedMatcher).before("rebalance:before_confirm"); InvocationMatcher localRebalanceMatcher = matchMethodCall("onTopologyUpdate").withParam(1, true).matchCount(0).build(); advanceOnComponentMethod(sequencer, cache(2), StateConsumer.class, localRebalanceMatcher).before("rebalance:before_get_tx").after("rebalance:after_get_tx"); consistentHashFactory.setOwnerIndexes(2, 1); consistentHashFactory.triggerRebalance(cache(0)); }
private void testAfterTopology(BiFunction<FunctionalMap.ReadWriteMap<String, Integer>, String, Integer> op, int expectedIncrement) throws Exception { cache(0).put("key", 1); // Blocking on receiver side. We cannot block the StateResponseCommand on the server side since // the InternalCacheEntries in its state are the same instances of data stored in DataContainer // - therefore when the command is blocked on sender the command itself would be mutated by applying // the transaction below. BlockingStateConsumer bsc2 = TestingUtil.wrapComponent(cache(2), StateConsumer.class, BlockingStateConsumer::new); chf.setOwnerIndexes(0, 2); Future<?> future = fork(() -> { TestResourceTracker.testThreadStarted(this); addClusterEnabledCacheManager(cb).getCache(); }); bsc2.await(); DistributionInfo distributionInfo = cache(2).getAdvancedCache().getDistributionManager().getCacheTopology().getDistribution("key"); assertFalse(distributionInfo.isReadOwner()); assertTrue(distributionInfo.isWriteBackup()); withTx(tm(2), () -> { FunctionalMap.ReadWriteMap<String, Integer> rw = ReadWriteMapImpl.create( FunctionalMapImpl.create(this.<String, Integer>cache(2).getAdvancedCache())); assertEquals(new Integer(1), op.apply(rw, "key")); return null; }); bsc2.unblock(); future.get(10, TimeUnit.SECONDS); InternalCacheEntry<Object, Object> ice = cache(2).getAdvancedCache().getDataContainer().get("key"); assertEquals("Current ICE: " + ice, 1 + expectedIncrement, ice.getValue()); }
controlledCHFactory.setOwnerIndexes(new int[][]{{1, 2}, {2, 1}}); manager(3).stop(); cacheManagers.remove(3);
controlledCHFactory.setOwnerIndexes(new int[][]{{1, 2}, {2, 1}}); manager(3).stop(); cacheManagers.remove(3);
@Override protected void createCacheManagers() throws Throwable { configurationBuilder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true, true); configurationBuilder.transaction().transactionManagerLookup(new EmbeddedTransactionManagerLookup()); configurationBuilder.clustering().remoteTimeout(30000, TimeUnit.MILLISECONDS); configurationBuilder.clustering().hash().l1().disable(); configurationBuilder.locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis()); configurationBuilder.clustering().stateTransfer().fetchInMemoryState(true); ControlledConsistentHashFactory consistentHashFactory = new ControlledConsistentHashFactory.Default(new int[][]{{KILLED_INDEX, ORIGINATOR_INDEX}, {KILLED_INDEX, OTHER_INDEX}}); configurationBuilder.clustering().hash().numSegments(2).consistentHashFactory(consistentHashFactory); createCluster(configurationBuilder, 3); waitForClusterToForm(); originatorCache = cache(ORIGINATOR_INDEX); killedCache = cache(KILLED_INDEX); otherCache = cache(OTHER_INDEX); // Set up the consistent hash after node 1 is killed consistentHashFactory.setOwnerIndexes(new int[][]{{ORIGINATOR_INDEX, OTHER_INDEX}, {OTHER_INDEX, ORIGINATOR_INDEX}}); // TODO Add another test method with ownership changing from [KILLED_INDEX, OTHER_INDEX] to [ORIGINATOR_INDEX, OTHER_INDEX] // i.e. the originator is a non-owner at first, and becomes the primary owner when the prepare is retried }