@Override public boolean isLocal(Object key) { return this.topology.getDistribution(key).isPrimary(); } }
public ConsistentHashLocality(Cache<?, ?> cache, ConsistentHash hash) { this.topology = new LocalizedCacheTopology(cache.getCacheConfiguration().clustering().cacheMode(), new CacheTopology(0, 0, hash, null, CacheTopology.Phase.NO_REBALANCE, Collections.emptyList(), Collections.emptyList()), cache.getCacheConfiguration().clustering().hash().keyPartitioner(), cache.getCacheManager().getAddress(), true); }
private String findKeyBasedOnOwnership(String keyPrefix, LocalizedCacheTopology cacheTopology, boolean shouldBePrimaryOwner) { for (int i = 0; i < 1000; i++) { String key = keyPrefix + i; boolean isPrimaryOwner = cacheTopology.getDistribution(key).isPrimary(); if (isPrimaryOwner == shouldBePrimaryOwner) { if (shouldBePrimaryOwner) { log.debugf("Found key %s with primary owner %s, segment %d", key, cacheTopology.getLocalAddress(), cacheTopology.getSegment(key)); } else { log.debugf("Found key %s with primary owner != %s, segment %d", key, cacheTopology.getLocalAddress(), cacheTopology.getSegment(key)); } return key; } } throw new RuntimeException("No key could be found for owner, this may be a bug in test or really bad luck!"); }
final int initialTopologyId = dm0.getCacheTopology().getTopologyId(); dm0.getCacheTopology().getDistribution("k1").readOwners()); assertNull(dm0.getCacheTopology().getPendingCH()); assertNotNull(dm0.getCacheTopology().getPendingCH()); assertEquals(Arrays.asList(address(0), address(1), address(2)), dm0.getCacheTopology().getPendingCH().locateOwnersForSegment(0)); assertEquals(Arrays.asList(address(1), address(2), address(3), address(0)), dm0.getCacheTopology().getDistribution("k1").writeOwners()); assertTrue(dm0.getCacheTopology().isReadOwner("k1")); assertTrue(dm0.getCacheTopology().isReadOwner("k2")); assertTrue(dm0.getCacheTopology().isReadOwner("k3")); assertEquals("v1", cache(0).get("k1")); assertEquals("v2", cache(0).get("k2"));
int currentTopologyId = cacheTopology.getTopologyId(); if (commandTopologyId != -1 && currentTopologyId != commandTopologyId) { throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY; List<Address> owners = cacheTopology.getDistribution(command.getKey()).writeOwners(); if (owners.contains(rpcManager.getAddress())) { return invokeNextAndHandle( ctx, command, (rCtx, rCommand, rv, throwable) ->
private Future<Void> simulateTopologyUpdate(Cache<Object, Object> cache) { StateTransferLock stl4 = TestingUtil.extractComponent(cache, StateTransferLock.class); DistributionManager dm4 = cache.getAdvancedCache().getDistributionManager(); LocalizedCacheTopology cacheTopology = dm4.getCacheTopology(); int newTopologyId = cacheTopology.getTopologyId() + 1; CacheTopology newTopology = new CacheTopology(newTopologyId, cacheTopology.getRebalanceId(), cacheTopology.getCurrentCH(), cacheTopology.getPendingCH(), cacheTopology.getUnionCH(), cacheTopology.getPhase(), cacheTopology.getActualMembers(), cacheTopology.getMembersPersistentUUIDs()); dm4.setCacheTopology(newTopology); return fork(() -> stl4.notifyTransactionDataReceived(newTopologyId)); } }
} else { LocalizedCacheTopology cacheTopology = advancedCache(0).getDistributionManager().getCacheTopology(); List<Address> members = new ArrayList<>(cacheTopology.getMembers()); List<Address> owners = cacheTopology.getDistribution(key).readOwners(); members.removeAll(owners); nonOwner = cacheTopology.getMembers().indexOf(members.get(0)); c = cache(nonOwner);
private int currentTopologyId(Cache cache) { return TestingUtil.extractComponent(cache, DistributionManager.class).getCacheTopology().getTopologyId(); } }
private int getSegment(Object key) { DistributionManager distributionManager = this.getDistributionManager(); if (distributionManager == null) { return 0; } return distributionManager.getCacheTopology().getSegment(key); }
public void testNewTopologySentAfterCleanMerge() { TestingUtil.waitForNoRebalanceAcrossManagers(managers()); int initialTopology = advancedCache(0).getRpcManager().getTopologyId(); expectCompleteTopology(client, initialTopology); PartitionDescriptor p0 = new PartitionDescriptor(0); PartitionDescriptor p1 = new PartitionDescriptor(1); splitCluster(p0.getNodes(), p1.getNodes()); eventuallyEquals(1, () -> advancedCache(0).getDistributionManager().getCacheTopology().getActualMembers().size()); eventuallyEquals(1, () -> advancedCache(1).getDistributionManager().getCacheTopology().getActualMembers().size()); expectPartialTopology(client, initialTopology + 1); partition(0).merge(partition(1)); int finalTopologyId = initialTopology + (partitionHandling == PartitionHandling.DENY_READ_WRITES ? 4 : 8); eventuallyExpectCompleteTopology(client, finalTopologyId); // Check that we got the number of topology updates to NO_REBALANCE right // With DENY_READ_WRITES: // T+1: DEGRADED_MODE in both partitions // T+3: merged, still DEGRADED_MODE // T+4: back to AVAILABLE // With ALLOW_READ_WRITES: // T+2: NO_REBALANCE in partition [B] before merge // T+3: CONFLICT_RESOLUTION, preferred CH: owners = (1) [test-NodeA-22368: 256+0] // T+4: NO_REBALANCE update topology after CR and before rebalance begins // T+5:READ_OLD (rebalance starts), T+6:READ_ALL, T+7:READ_NEW, T+8: NO_REBALANCE LocalizedCacheTopology newTopology = advancedCache(0).getDistributionManager().getCacheTopology(); assertEquals(CacheTopology.Phase.NO_REBALANCE, newTopology.getPhase()); assertEquals(finalTopologyId, newTopology.getTopologyId()); }
private void checkRehashed(DistributionManager dm, List<Cache<Object,Object>> caches, List<Address> addresses) { TestingUtil.waitForNoRebalance(caches); assertNull(dm.getCacheTopology().getPendingCH()); ConsistentHash ch = dm.getCacheTopology().getCurrentCH(); assertEquals(addresses, ch.getMembers()); for (int i = 0; i < ch.getNumSegments(); i++) { assertEquals(ch.getNumOwners(), ch.locateOwnersForSegment(i).size()); } } }
private static synchronized int getHashCodeForSegment(LocalizedCacheTopology cacheTopology, int segment) { int numSegments = cacheTopology.getReadConsistentHash().getNumSegments(); // Caching the hash codes prevents random failures in tests where we create many magic keys int[] hcs = hashCodes.computeIfAbsent(numSegments, k -> new int[numSegments]); int hc = hcs[segment]; if (hc != 0) { return hc; } Random r = new Random(); int attemptsLeft = 100 * numSegments; int dummy; do { dummy = r.nextInt(); attemptsLeft--; if (attemptsLeft < 0) { throw new IllegalStateException("Could not find any key in segment " + segment); } } while (cacheTopology.getSegment(dummy) != segment); return hcs[segment] = dummy; }
int topologyBeforeSplit = dmP0N1.getCacheTopology().getTopologyId(); splitCluster(p0.getNodes(), p1.getNodes()); int currentTopologyP0N0 = dmP0N0.getCacheTopology().getTopologyId(); int currentTopologyP0N1 = dmP0N1.getCacheTopology().getTopologyId(); log.debugf("Topology before split: %d, now on P0N0: %d, P0N1: %d", topologyBeforeSplit, currentTopologyP0N0, currentTopologyP0N1); if (currentTopologyP0N0 == topologyBeforeSplit + 1 && currentTopologyP0N1 == topologyBeforeSplit + 1) { eventuallyEquals(2, () -> dmP0N1.getCacheTopology().getActualMembers().size()); assertEquals(AvailabilityMode.AVAILABLE, partitionHandlingManager(p0.node(0)).getAvailabilityMode());
public static void assertHashTopology20Received(AbstractTestTopologyAwareResponse topoResp, List<HotRodServer> servers, String cacheName, int expectedTopologyId) { TestHashDistAware20Response hashTopologyResp = (TestHashDistAware20Response) topoResp; assertEquals(expectedTopologyId, hashTopologyResp.topologyId); assertEquals(hashTopologyResp.members.size(), servers.size()); Set<ServerAddress> serverAddresses = servers.stream().map(HotRodServer::getAddress).collect(Collectors.toSet()); hashTopologyResp.members.forEach(member -> assertTrue(serverAddresses.contains(member))); assertEquals(hashTopologyResp.hashFunction, 3); // Assert segments Cache cache = servers.get(0).getCacheManager().getCache(cacheName); LocalizedCacheTopology cacheTopology = cache.getAdvancedCache().getDistributionManager().getCacheTopology(); assertEquals(cacheTopology.getActualMembers().size(), servers.size()); ConsistentHash ch = cacheTopology.getCurrentCH(); int numSegments = ch.getNumSegments(); int numOwners = ch.getNumOwners(); assertEquals(hashTopologyResp.segments.size(), numSegments); for (int i = 0; i < numSegments; ++i) { List<Address> segment = ch.locateOwnersForSegment(i); Iterable<ServerAddress> members = hashTopologyResp.segments.get(i); assertEquals(Math.min(numOwners, ch.getMembers().size()), segment.size()); int count = 0; for (ServerAddress member : members) { count++; assertTrue(serverAddresses.contains(member)); } // The number of servers could be smaller than the number of CH members (same as the number of actual members) assertEquals(Math.min(numOwners, servers.size()), count); } }
log.infof("Skipping test on failing key %s", key); } else { Collection<Address> owners = cacheTopology.getWriteOwners(key); for (Map.Entry<Address, Cache<Object, Object>> e : cacheMap.entrySet()) { try { key, cacheTopology.getSegment(key), owners, cacheTopology); throw th;
public void testNewTopologySentAfterOverlappingMerge() { TestingUtil.waitForNoRebalanceAcrossManagers(managers()); int initialTopology = advancedCache(0).getRpcManager().getTopologyId(); expectCompleteTopology(client, initialTopology); PartitionDescriptor p1 = new PartitionDescriptor(0); // isolatePartitions will always result in a CR fail as Node 0 tries to contact Node 1 in order to receive segments // which is not possible as all messages received by Node 1 from Node 0 are discarded by the DISCARD protocol. // Therefore, it is necessary for the state transfer timeout to be < then the timeout utilised by TestingUtil::waitForNoRebalance isolatePartition(p1.getNodes()); eventuallyEquals(1, () -> advancedCache(0).getDistributionManager().getCacheTopology().getActualMembers().size()); eventuallyExpectPartialTopology(client, initialTopology + 1); partition(0).merge(partition(1)); int finalTopologyId = initialTopology + (partitionHandling == PartitionHandling.DENY_READ_WRITES ? 2 : 7); eventuallyExpectCompleteTopology(client, finalTopologyId); // Check that we got the number of topology updates to NO_REBALANCE right // With DENY_READ_WRITES: // T+1: DEGRADED_MODE in partition [A] // T+2: back to AVAILABLE // With ALLOW_READ_WRITES: // With ALLOW_READ_WRITES: // T+2: CONFLICT_RESOLUTION, preferred CH: owners = (1) [test-NodeA-22368: 256+0] // T+3: NO_REBALANCE update topology after CR and before rebalance begins // T+4:READ_OLD (rebalance starts), T+5:READ_ALL, T+6:READ_NEW, T+7: NO_REBALANCE LocalizedCacheTopology newTopology = advancedCache(0).getDistributionManager().getCacheTopology(); assertEquals(CacheTopology.Phase.NO_REBALANCE, newTopology.getPhase()); }
public void assertActualMembers() { Set<org.infinispan.remoting.transport.Address> expected = cachesInThisPartition().stream() .map(c -> c.getAdvancedCache().getRpcManager().getAddress()) .collect(Collectors.toSet()); for (Cache c : cachesInThisPartition()) { eventuallyEquals(expected, () -> new HashSet<>(c.getAdvancedCache().getDistributionManager().getCacheTopology().getActualMembers())); } }
public static void assertHashIds(Map<ServerAddress, List<Integer>> hashIds, List<HotRodServer> servers, String cacheName) { Cache cache = servers.get(0).getCacheManager().getCache(cacheName); DistributionManager distributionManager = cache.getAdvancedCache().getDistributionManager(); ConsistentHash consistentHash = distributionManager.getCacheTopology().getCurrentCH(); int numSegments = consistentHash.getNumSegments(); int numOwners = consistentHash.getNumOwners(); assertEquals(hashIds.size(), servers.size()); int segmentSize = (int) Math.ceil((double) Integer.MAX_VALUE / numSegments); Map<Integer, ServerAddress>[] owners = new Map[numSegments]; for (Map.Entry<ServerAddress, List<Integer>> entry : hashIds.entrySet()) { ServerAddress serverAddress = entry.getKey(); List<Integer> serverHashIds = entry.getValue(); for (Integer hashId : serverHashIds) { int segmentIdx = (hashId / segmentSize + numSegments - 1) % numSegments; int ownerIdx = hashId % segmentSize; if (owners[segmentIdx] == null) { owners[segmentIdx] = new HashMap<>(); } owners[segmentIdx].put(ownerIdx, serverAddress); } } for (int i = 0; i < numSegments; ++i) { List<ServerAddress> segmentOwners = owners[i].entrySet().stream().sorted(Comparator.comparing(Map.Entry::getKey)) .map(Map.Entry::getValue).collect(Collectors.toList()); assertEquals(segmentOwners.size(), numOwners); List<ServerAddress> chOwners = consistentHash.locateOwnersForSegment(i).stream() .map(a -> clusterAddressToServerAddress(servers, a)).collect(Collectors.toList()); assertEquals(segmentOwners, chOwners); } }
private int currentTopologyId(Cache cache) { return cache.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId(); }
private int getSegment(Object key) { DistributionManager distributionManager = this.getDistributionManager(); if (distributionManager == null) { return 0; } return distributionManager.getCacheTopology().getSegment(key); }