protected Address addressOf(Cache<?, ?> cache) { return DistributionTestHelper.addressOf(cache); }
public static boolean hasOwners(Object key, Cache<?, ?> primaryOwner, Cache<?, ?>... backupOwners) { DistributionManager dm = primaryOwner.getAdvancedCache().getDistributionManager(); List<Address> ownerAddresses = dm.locate(key); if (!addressOf(primaryOwner).equals(ownerAddresses.get(0))) return false; for (int i = 0; i < backupOwners.length; i++) { if (!ownerAddresses.contains(addressOf(backupOwners[i]))) return false; } return true; }
public static void assertIsInL1(Cache<?, ?> cache, Object key) { DataContainer dc = cache.getAdvancedCache().getDataContainer(); InternalCacheEntry ice = dc.get(key); assert ice != null : "Entry for key [" + key + "] should be in L1 on cache at [" + addressOf(cache) + "]!"; assert !(ice instanceof ImmortalCacheEntry) : "Entry for key [" + key + "] should have a lifespan on cache at [" + addressOf(cache) + "]!"; }
public static boolean isOwner(Cache<?, ?> c, Object key) { DistributionManager dm = c.getAdvancedCache().getDistributionManager(); List<Address> ownerAddresses = dm.locate(key); return ownerAddresses.contains(addressOf(c)); }
public static boolean isFirstOwner(Cache<?, ?> c, Object key) { DistributionManager dm = c.getAdvancedCache().getDistributionManager(); Address primaryOwnerAddress = dm.getPrimaryLocation(key); return addressOf(c).equals(primaryOwnerAddress); }
public static void assertIsNotInL1(Cache<?, ?> cache, Object key) { DataContainer dc = cache.getAdvancedCache().getDataContainer(); InternalCacheEntry ice = dc.get(key); assert ice == null : "Entry for key [" + key + "] should not be in data container at all on cache at [" + addressOf(cache) + "]!"; }
public MagicKey(String name, Cache<?, ?> primaryOwner, Cache<?, ?>... backupOwners) { this.name = name; Address primaryAddress = addressOf(primaryOwner); this.address = primaryAddress.toString(); LocalizedCacheTopology cacheTopology = primaryOwner.getAdvancedCache().getDistributionManager().getCacheTopology(); ConsistentHash ch = cacheTopology.getWriteConsistentHash(); segment = findSegment(ch.getNumSegments(), s -> { List<Address> owners = ch.locateOwnersForSegment(s); if (!primaryAddress.equals(owners.get(0))) return false; for (Cache<?, ?> backup : backupOwners) { if (!owners.contains(addressOf(backup))) return false; } return true; }); if (segment < 0) { throw new IllegalStateException("Could not find any segment owned by " + primaryOwner + ", " + Arrays.toString(backupOwners) + ", primary segments: " + segments(primaryOwner) + ", backup segments: " + Stream.of(backupOwners).collect(Collectors.toMap(Function.identity(), this::segments))); } hashcode = getHashCodeForSegment(cacheTopology, segment); unique = counter.getAndIncrement(); }
public static void assertIsInContainerImmortal(Cache<?, ?> cache, Object key) { Log log = LogFactory.getLog(BaseDistFunctionalTest.class); DataContainer dc = cache.getAdvancedCache().getDataContainer(); InternalCacheEntry ice = dc.get(key); if (ice == null) { String msg = "Entry for key [" + key + "] should be in data container on cache at [" + addressOf(cache) + "]!"; log.fatal(msg); assert false : msg; } if (!(ice instanceof ImmortalCacheEntry)) { String msg = "Entry for key [" + key + "] on cache at [" + addressOf(cache) + "] should be immortal but was [" + ice + "]!"; log.fatal(msg); assert false : msg; } }
public void testDisjointSetTransaction() throws Exception { MagicKey k1 = new MagicKey(cache(0), cache(1)); MagicKey k2 = new MagicKey(cache(1), cache(2)); // make sure the owners of k1 and k2 are NOT the same! Set<Address> k1Owners = new HashSet<Address>(); Set<Address> k2Owners = new HashSet<Address>(); for (Cache<?, ?> cache: caches()) { if (isOwner(cache, k1)) k1Owners.add(addressOf(cache)); if (isOwner(cache, k2)) k2Owners.add(addressOf(cache)); } assert k1Owners.size() == 2: "Expected 2 owners for k1; was " + k1Owners; assert k2Owners.size() == 2: "Expected 2 owners for k1; was " + k2Owners; assert !k1Owners.equals(k2Owners) : format("k1 and k2 should have different ownership set. Was %s and %s", k1Owners, k2Owners); tm(0).begin(); cache(0).put(k1, "v1"); cache(0).put(k2, "v2"); tm(0).commit(); } }
public MagicKey(String name, Cache<?, ?> primaryOwner) { this.name = name; Address primaryAddress = addressOf(primaryOwner); this.address = primaryAddress.toString(); LocalizedCacheTopology cacheTopology = primaryOwner.getAdvancedCache().getDistributionManager().getCacheTopology(); ConsistentHash ch = cacheTopology.getWriteConsistentHash(); int segment = findSegment(ch.getNumSegments(), s -> primaryAddress.equals(ch.locatePrimaryOwnerForSegment(s))); if (segment < 0) { throw new IllegalStateException("Could not find any segment owned by " + primaryOwner + ", primary segments: " + segments(primaryOwner)); } this.segment = segment; hashcode = getHashCodeForSegment(cacheTopology, segment); unique = counter.getAndIncrement(); }
public static void assertIsInL1OrNull(Cache<?, ?> cache, Object key) { Log log = LogFactory.getLog(BaseDistFunctionalTest.class); DataContainer dc = cache.getAdvancedCache().getDataContainer(); InternalCacheEntry ice = dc.get(key); if (ice instanceof ImmortalCacheEntry) { String msg = "Entry for key [" + key + "] on cache at [" + addressOf(cache) + "] should be mortal or null but was [" + ice + "]!"; log.fatal(msg); assert false : msg; } }
private void assertDataForSite(Method method, String site) { for (Cache<String, String> cache : this.<String, String>caches(site)) { for (int i = 0; i < NR_KEYS; ++i) { assertEquals("Cache=" + addressOf(cache), TestingUtil.v(method, i), cache.get(TestingUtil.k(method, i))); } } } }
/** * x-site state transfer is triggered during a cache topology change. */ private void doXSiteStateTransferDuringTopologyChange(TopologyEvent event) throws Exception { log.debugf("Start topology change during x-site state transfer with %s", event); initBeforeTest(); final TestCaches<Object, Object> testCaches = createTestCache(event, LON); log.debugf("Controlled cache=%s, Coordinator cache=%s, Cache to remove=%s", addressOf(testCaches.controllerCache), addressOf(testCaches.coordinator), testCaches.removeIndex < 0 ? "NONE" : addressOf(cache(LON, testCaches.removeIndex))); BlockingLocalTopologyManager topologyManager = replaceTopologyManagerDefaultCache(testCaches.controllerCache.getCacheManager()); final Future<Void> topologyEventFuture = triggerTopologyChange(LON, testCaches.removeIndex); // We could get either the NO_REBALANCE update or the READ_OLD rebalance start first BlockingLocalTopologyManager.BlockedTopology blockedTopology = topologyManager.expectTopologyUpdate(); log.debug("Start x-site state transfer"); startStateTransfer(testCaches.coordinator, NYC); assertOnline(LON, NYC); blockedTopology.unblock(); topologyEventFuture.get(); awaitLocalStateTransfer(LON); awaitXSiteStateSent(LON); assertData(); }
public static void assertOwnershipAndNonOwnership(List<? extends Cache> caches, Object key) { EntryVersion ownerVersion = null; for (Cache c: caches) { DataContainer dc = c.getAdvancedCache().getDataContainer(); InternalCacheEntry ice = dc.peek(key); if (isOwner(c, key)) { assert ice != null : "Fail on owner cache " + addressOf(c) + ": dc.get(" + key + ") returned null!"; assert ice instanceof ImmortalCacheEntry : "Fail on owner cache " + addressOf(c) + ": dc.get(" + key + ") returned " + safeType(ice); ownerVersion = ice.getMetadata().version(); } } assertNotNull(ownerVersion); if (caches.size() == 1) { return; } int equalVersions = 0; for (Cache c: caches) { DataContainer dc = c.getAdvancedCache().getDataContainer(); InternalCacheEntry ice = dc.peek(key); if (!isOwner(c, key)) { if (ice == null) continue; if (ice != null && ice.getMetadata() != null && ownerVersion.equals(ice.getMetadata().version())) ++equalVersions; } } assertEquals(equalVersions, 1); }
final AtomicReference<StateTransferRequest> pendingRequest = new AtomicReference<>(null); log.debugf("Controlled cache=%s, Coordinator cache=%s, Cache to remove=%s", addressOf(testCaches.controllerCache), addressOf(testCaches.coordinator), testCaches.removeIndex < 0 ? "NONE" : addressOf(cache(LON, testCaches.removeIndex))); log.debugf("Discard x-site state transfer start command in cache %s to remove", addressOf(cache(LON, testCaches.removeIndex))); wrapComponent(cache(LON, testCaches.removeIndex), XSiteStateProvider.class, (WrapFactory<XSiteStateProvider, XSiteStateProvider, Cache<?, ?>>) (wrapOn, current) -> new XSiteProviderDelegator(current) { log.debugf("Block x-site state transfer start command in cache %s", addressOf(cache(LON, 1))); wrapComponent(cache(LON, 1), XSiteStateProvider.class, (WrapFactory<XSiteStateProvider, XSiteStateProvider, Cache<?, ?>>) (wrapOn, current) -> new XSiteProviderDelegator(current) {
Future<Void> triggerTopologyChange(final String siteName, final int removeIndex) { if (removeIndex >= 0) { return fork(() -> { log.debugf("Shutting down cache %s", addressOf(cache(siteName, removeIndex))); site(siteName).kill(removeIndex); log.debugf("Wait for cluster to form on caches %s", site(siteName).getCaches(null)); site(siteName).waitForClusterToForm(null, 60, TimeUnit.SECONDS); return null; }); } else { log.debug("Adding new cache"); site(siteName).addCache(globalConfigurationBuilderForSite(siteName), lonConfigurationBuilder()); return fork(() -> { log.debugf("Wait for cluster to form on caches %s", site(siteName).getCaches(null)); site(siteName).waitForClusterToForm(null, 60, TimeUnit.SECONDS); return null; }); } }