public ConsistentHashLocality(Cache<?, ?> cache, ConsistentHash hash) { this.topology = new LocalizedCacheTopology(cache.getCacheConfiguration().clustering().cacheMode(), new CacheTopology(0, 0, hash, null, CacheTopology.Phase.NO_REBALANCE, Collections.emptyList(), Collections.emptyList()), cache.getCacheConfiguration().clustering().hash().keyPartitioner(), cache.getCacheManager().getAddress(), true); }
@Override public void call() { assertNotNull( "getObject() should have returned a valid EmbeddedCacheManager, configured using the configuration file " + "set on SpringEmbeddedCacheManagerFactoryBean. However, it returned null.", cm); final Cache<Object, Object> cacheDefinedInCustomConfiguration = cm.getCache(CACHE_NAME_FROM_CONFIGURATION_FILE); final Configuration configuration = cacheDefinedInCustomConfiguration.getCacheConfiguration(); assertEquals( "The cache named [" + CACHE_NAME_FROM_CONFIGURATION_FILE + "] is configured to have asynchonous replication cache mode. Yet, the cache returned from getCache(" + CACHE_NAME_FROM_CONFIGURATION_FILE + ") has a different cache mode. Obviously, SpringEmbeddedCacheManagerFactoryBean did not use " + "the configuration file when instantiating EmbeddedCacheManager.", CacheMode.REPL_ASYNC, configuration.clustering().cacheMode()); } });
public void testSimpleDistributedClusterModeDefault() throws Exception { ConfigurationBuilder builder = new ConfigurationBuilder(); builder.clustering().cacheMode(DIST_SYNC) .hash().numOwners(3).numSegments(51); cm = TestCacheManagerFactory.createClusteredCacheManager(builder); cm.defineConfiguration("my-cache", builder.build()); Cache<?, ?> cache = cm.getCache("my-cache"); // These are all overridden values ClusteringConfiguration clusteringCfg = cache.getCacheConfiguration().clustering(); assertEquals(DIST_SYNC, clusteringCfg.cacheMode()); assertEquals(3, clusteringCfg.hash().numOwners()); assertEquals(51, clusteringCfg.hash().numSegments()); }
@Test public void testFilter() { final boolean isClustered = cache(0).getCacheConfiguration().clustering().cacheMode().isClustered(); for (int i = 0; i < 10; ++i) { Person value = new Person(); value.setName("John"); value.setAge(i + 30); Cache<Object, Person> cache = cache(i % numNodes); Object key = isClustered ? new MagicKey(cache) : i; cache.put(key, value); } IckleFilterAndConverter filterAndConverter = new IckleFilterAndConverter<Object, Person>("from org.infinispan.query.test.Person where blurb is null and age <= 31", null, ReflectionMatcher.class); Stream<CacheEntry<Object, Object>> stream = cache(0).getAdvancedCache().cacheEntrySet().stream(); CloseableIterator<Map.Entry<Object, ObjectFilter.FilterResult>> iterator = Closeables.iterator(CacheFilters.filterAndConvert(stream, filterAndConverter).iterator()); Map<Object, ObjectFilter.FilterResult> results = mapFromIterator(iterator); assertEquals(2, results.size()); for (ObjectFilter.FilterResult p : results.values()) { assertNull(((Person) p.getInstance()).getBlurb()); assertTrue(((Person) p.getInstance()).getAge() <= 31); } }
Configuration configuration; if (baseCacheName != null) { configuration = cacheManager.getCacheConfiguration(baseCacheName); if (configuration == null) { throw log.nonExistentCache(baseCacheName); configuration = cacheManager.getDefaultCacheConfiguration(); baseCacheName = BasicCacheContainer.DEFAULT_CACHE_NAME; if (cacheManager.cacheExists(cacheName)) { throw log.cacheAlreadyExists(cacheName); if (configuration.clustering().cacheMode().isClustered()) { AdvancedCache<?, ?> clusteredCache = cacheManager.getCache(baseCacheName).getAdvancedCache(); RpcManager rpc = clusteredCache.getRpcManager(); CommandsFactory factory = clusteredCache.getComponentRegistry().getComponent(CommandsFactory.class);
assertFalse(c.invocationBatching().enabled()); assertTrue(c.jmxStatistics().enabled()); assertEquals(CacheMode.LOCAL, c.clustering().cacheMode()); assertEquals(CacheMode.INVALIDATION_SYNC, c.clustering().cacheMode()); assertEquals(CacheMode.REPL_SYNC, c.clustering().cacheMode()); assertFalse(c.clustering().stateTransfer().fetchInMemoryState()); assertEquals(60000, c.clustering().stateTransfer().timeout()); assertEquals(10000, c.clustering().stateTransfer().chunkSize()); assertEquals(CacheMode.DIST_SYNC, c.clustering().cacheMode()); assertFalse(c.invocationBatching().enabled()); assertEquals(1200000, c.clustering().l1().lifespan()); assertEquals(4, c.clustering().hash().numOwners()); assertEquals(35000, c.clustering().remoteTimeout()); assertEquals(2, c.clustering().hash().numSegments()); assertTrue(c.clustering().hash().consistentHashFactory() instanceof SyncConsistentHashFactory); assertTrue(c.clustering().partitionHandling().enabled()); assertTrue(c.clustering().stateTransfer().fetchInMemoryState()); assertEquals(60500, c.clustering().stateTransfer().timeout()); assertEquals(10500, c.clustering().stateTransfer().chunkSize()); assertEquals(CacheMode.REPL_SYNC, c.clustering().cacheMode()); assertEquals(CacheMode.REPL_SYNC, c.clustering().cacheMode());
public void testRollbackSpanningCaches2() throws Exception { startAllCaches(); Cache<String, String> c1 = cache(0, "c1"); assertTrue(c1.getCacheConfiguration().clustering().cacheMode().isClustered()); Cache<String, String> c1Replica = cache(1, "c1"); assertTrue(c1.isEmpty()); assertTrue(c1Replica.isEmpty()); c1.put("c1key", "c1value"); assertEquals(c1.get("c1key"), "c1value"); assertEquals(c1Replica.get("c1key"), "c1value"); }
private void doSimpleConcurrentTest(final Operation operation) throws Exception { assertEquals("Wrong number of owner. Please change the configuration", 2, cache(0).getCacheConfiguration().clustering().hash().numOwners()); final Object key = new MagicKey(cache(0), cache(1)); cache(0).put(key, "v1"); cache(1).put(key, "tx1"); tm(1).commit(); return Boolean.TRUE; assertTrue("Tx2 has not finished", tx2.get(20, TimeUnit.SECONDS)); assertTrue("Tx1 has not finished", tx1.get(20, TimeUnit.SECONDS));
void countWithSegments(ToIntBiFunction<SegmentedAdvancedLoadWriteStore<?, ?>, IntSet> countFunction) { Cache<byte[], byte[]> cache = localCacheManager.getCache(REMOTE_CACHE); RemoteStore rs = (RemoteStore) cl; rs.write(marshalledEntry(internalCacheEntry("k1", "v1", 100))); Iterator<byte[]> iter = cache.keySet().iterator(); assertTrue(iter.hasNext()); byte[] key = iter.next(); assertFalse(iter.hasNext()); KeyPartitioner keyPartitioner = TestingUtil.extractComponent(cache, KeyPartitioner.class); int segment = keyPartitioner.getSegment(key); // Publish keys should return our key if we use a set that contains that segment assertEquals(1, countFunction.applyAsInt(rs, IntSets.immutableSet(segment))); // Create int set that includes all segments but the one that maps to the key int maxSegments = cache.getCacheConfiguration().clustering().hash().numSegments(); IntSet intSet = IntSets.mutableEmptySet(maxSegments); for (int i = 0; i < maxSegments; ++i) { if (i != segment) { intSet.set(i); } } // Publish keys shouldn't return our key since the IntSet doesn't contain our segment assertEquals(0, countFunction.applyAsInt(rs, intSet)); }
protected void checkConsistencyAcrossCluster(String cacheName, Configuration configuration) { // Initialize the partitioner to ensure we can compare config equality configuration.clustering().hash().keyPartitioner().init(configuration.clustering().hash()); for (EmbeddedCacheManager m : cacheManagers) { Configuration actualConfiguration = m.getCacheConfiguration(cacheName); assertNotNull("Cache " + cacheName + " missing from " + m, actualConfiguration); assertEquals(configuration, actualConfiguration); Cache<Object, Object> cache = m.getCache(cacheName); assertEquals(cacheManagers.size(), cache.getAdvancedCache().getRpcManager().getMembers().size()); } }
assertTrue(!c.clustering().cacheMode().isClustered()); assertTrue(c.transaction().transactionManagerLookup() instanceof GenericTransactionManagerLookup); if (!deprecated) { assertReaperAndTimeoutInfo(defaultCfg); assertEquals(CacheMode.INVALIDATION_SYNC, c.clustering().cacheMode()); assertTrue(c.clustering().stateTransfer().awaitInitialTransfer()); assertEquals(15000, c.clustering().remoteTimeout()); assertEquals(CacheMode.INVALIDATION_ASYNC, c.clustering().cacheMode()); assertEquals(15000, c.clustering().remoteTimeout()); assertEquals(CacheMode.REPL_SYNC, c.clustering().cacheMode()); assertTrue(!c.clustering().stateTransfer().fetchInMemoryState()); assertTrue(c.clustering().stateTransfer().awaitInitialTransfer()); assertEquals(15000, c.clustering().remoteTimeout()); assertEquals(CacheMode.REPL_ASYNC, c.clustering().cacheMode()); assertTrue(!c.clustering().stateTransfer().fetchInMemoryState()); assertTrue(c.clustering().stateTransfer().awaitInitialTransfer()); assertEquals(CacheMode.REPL_SYNC, c.clustering().cacheMode()); assertTrue(!c.clustering().stateTransfer().fetchInMemoryState()); assertTrue(c.clustering().stateTransfer().awaitInitialTransfer()); assertEquals(15000, c.clustering().remoteTimeout()); assertEquals(CacheMode.LOCAL, c.clustering().cacheMode()); assertEquals(CacheMode.DIST_SYNC, c.clustering().cacheMode());
public void testKeySegmentFilter() { Cache<Integer, String> cache = getCache(0); int range = 12; // First populate the cache with a bunch of values IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value")); assertEquals(range, cache.size()); CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet(); // Take the first half of the segments int segments = cache.getCacheConfiguration().clustering().hash().numSegments() / 2; AtomicInteger realCount = new AtomicInteger(); KeyPartitioner keyPartitioner = cache.getAdvancedCache().getComponentRegistry().getComponent(KeyPartitioner.class); cache.forEach((k, v) -> { if (segments >= keyPartitioner.getSegment(k)) { realCount.incrementAndGet(); } }); assertEquals(realCount.get(), createStream(entrySet).filterKeySegments( IntStream.range(0, segments).boxed().collect(Collectors.toSet())).count()); }
@Override public void call() { GlobalConfiguration globalCfg = cm.getCacheManagerConfiguration(); assertFalse(globalCfg.defaultCacheName().isPresent()); assertNull(cm.getDefaultCacheConfiguration()); assertEquals(CacheMode.REPL_SYNC, cm.getCacheConfiguration("default").clustering().cacheMode()); }
public void testExpirationMaxIdleInExec() throws Exception { for (int i = 0; i < SIZE; i++) { cache.put("key-" + i, "value-" + i,-1, null, 1, TimeUnit.MILLISECONDS); } timeService.advance(2); if (cache.getCacheConfiguration().clustering().cacheMode().isClustered()) { AtomicInteger invocationCount = new AtomicInteger(); cache.getAdvancedCache().getDataContainer().executeTask(KeyFilter.ACCEPT_ALL_FILTER, (k, ice) -> invocationCount.incrementAndGet()); assertEquals(SIZE, invocationCount.get()); } else { cache.getAdvancedCache().getDataContainer().executeTask(KeyFilter.ACCEPT_ALL_FILTER, (k, ice) -> { throw new RuntimeException("No task should be executed on expired entry"); }); } }
GlobalConfiguration gc = cm.getCacheManagerConfiguration(); Configuration defaultCfg = cm.getCache().getCacheConfiguration(); assert defaultCfg.clustering().cacheMode() == CacheMode.REPL_SYNC; Configuration cfg = cm.getCache("local").getCacheConfiguration(); assert cfg.clustering().cacheMode() == CacheMode.LOCAL; cfg = cm.getCache("dist").getCacheConfiguration(); assert cfg.clustering().cacheMode() == CacheMode.DIST_SYNC; cfg = cm.getCache("distasync").getCacheConfiguration(); assert cfg.clustering().cacheMode() == CacheMode.DIST_ASYNC; cfg = cm.getCache("replicationasync").getCacheConfiguration(); assert cfg.clustering().cacheMode() == CacheMode.REPL_ASYNC; } finally { TestingUtil.killCacheManagers(cm);
@Override public String locate(String sessionId) { DistributionManager dist = this.cache.getAdvancedCache().getDistributionManager(); Address address = (dist != null) && !this.cache.getCacheConfiguration().clustering().cacheMode().isScattered() ? dist.getCacheTopology().getDistribution(new Key<>(sessionId)).primary() : this.cache.getCacheManager().getAddress(); Node node = (address != null) ? this.factory.createNode(address) : null; Map.Entry<String, Void> entry = (node != null) ? this.registry.getEntry(node) : null; if (entry == null) { entry = this.registry.getEntry(this.registry.getGroup().getLocalMember()); } return (entry != null) ? entry.getKey() : null; } }
/** * Tests that the configuration contains the values expected, as well as immutability of certain elements */ public void testConfiguration() { Configuration c = cache.getCacheConfiguration(); assertEquals(CacheMode.LOCAL, c.clustering().cacheMode()); assertNotNull(c.transaction().transactionManagerLookup()); }
EmbeddedCacheManager cacheManager = (EmbeddedCacheManager) new InitialContext().lookup(cacheContainerLookup); Configuration ssoCacheConfiguration = cacheManager.getCacheConfiguration(cacheName); if (ssoCacheConfiguration == null) { Configuration cacheConfiguration = cacheManager.getCacheConfiguration(deploymentSessionCacheName); if (cacheConfiguration == null) { LOG.debugv("Using default configuration for SSO cache {0}.{1}.", containerName, cacheName); ssoCacheConfiguration = cacheManager.getDefaultCacheConfiguration(); } else { LOG.debugv("Using distributed HTTP session cache configuration for SSO cache {0}.{1}, configuration taken from cache {2}", CacheMode ssoCacheMode = ssoCacheConfiguration.clustering().cacheMode(); if (ssoCacheMode != CacheMode.REPL_ASYNC && ssoCacheMode != CacheMode.REPL_SYNC) { LOG.warnv("SSO cache mode is {0}, it is recommended to use replicated mode instead.", ssoCacheConfiguration.clustering().cacheModeString()); Cache<String, String[]> ssoCache = cacheManager.getCache(cacheName, true); final SsoSessionCacheListener listener = new SsoSessionCacheListener(ssoCache, mapper); ssoCache.addListener(listener);
Node locatePrimaryOwner(I id) { DistributionManager dist = this.cache.getAdvancedCache().getDistributionManager(); Address address = (dist != null) && !this.cache.getCacheConfiguration().clustering().cacheMode().isScattered() ? dist.getCacheTopology().getDistribution(new Key<>(id)).primary() : null; Node member = (address != null) ? this.nodeFactory.createNode(address) : null; return (member != null) ? member : this.registry.getGroup().getLocalMember(); }
@Override public void call() { GlobalConfiguration globalCfg = cm.getCacheManagerConfiguration(); assertTrue(globalCfg.transport().transport() instanceof JGroupsTransport); assertEquals("demoCluster", globalCfg.transport().clusterName()); Configuration cfg = cm.getDefaultCacheConfiguration(); assertEquals(CacheMode.REPL_SYNC, cfg.clustering().cacheMode()); }