public static <K, V> DataContainer<K, V> createDataContainer(ConfigurationBuilder builder, long size, Predicate<K> evictable) { EntrySizeCalculator<? super K, ? super InternalCacheEntry<K, V>> calculator = (key, entry) -> evictable.test(key) ? 1 : 0; return builder.clustering().cacheMode().needsStateTransfer() ? new BoundedSegmentedDataContainer<>(builder.clustering().hash().create().numSegments(), size, calculator) : new EvictableDataContainer<>(size, calculator); } }
public ConsistentHashLocality(Cache<?, ?> cache, ConsistentHash hash) { this.topology = new LocalizedCacheTopology(cache.getCacheConfiguration().clustering().cacheMode(), new CacheTopology(0, 0, hash, null, CacheTopology.Phase.NO_REBALANCE, Collections.emptyList(), Collections.emptyList()), cache.getCacheConfiguration().clustering().hash().keyPartitioner(), cache.getCacheManager().getAddress(), true); }
assertEquals(CacheMode.DIST_SYNC, c.clustering().cacheMode()); assertEquals(600000, c.clustering().l1().lifespan()); if (deprecated) assertEquals(120000, c.clustering().hash().rehashRpcTimeout()); assertEquals(120000, c.clustering().stateTransfer().timeout()); assertEquals(1200, c.clustering().l1().cleanupTaskFrequency()); assertEquals(null, c.clustering().hash().consistentHash()); // this is just an override. assertEquals(3, c.clustering().hash().numOwners()); assertTrue(c.clustering().l1().enabled()); assertEquals(CacheMode.DIST_SYNC, c.clustering().cacheMode()); assertEquals(600000, c.clustering().l1().lifespan()); if (deprecated) assertEquals(120000, c.clustering().hash().rehashRpcTimeout()); assertEquals(120000, c.clustering().stateTransfer().timeout()); assertEquals(null, c.clustering().hash().consistentHash()); // this is just an override. assertEquals(3, c.clustering().hash().numOwners()); assertTrue(c.clustering().l1().enabled()); assertEquals(0.0f, c.clustering().hash().capacityFactor()); if (!deprecated) assertEquals(1000, c.clustering().hash().numSegments()); assertTrue(c.clustering().hash().groups().enabled()); assertEquals(1, c.clustering().hash().groups().groupers().size()); assertEquals(String.class, c.clustering().hash().groups().groupers().get(0).getKeyType());
public void testSimpleDistributedClusterModeDefault() throws Exception { ConfigurationBuilder builder = new ConfigurationBuilder(); builder.clustering().cacheMode(DIST_SYNC) .hash().numOwners(3).numSegments(51); cm = TestCacheManagerFactory.createClusteredCacheManager(builder); cm.defineConfiguration("my-cache", builder.build()); Cache<?, ?> cache = cm.getCache("my-cache"); // These are all overridden values ClusteringConfiguration clusteringCfg = cache.getCacheConfiguration().clustering(); assertEquals(DIST_SYNC, clusteringCfg.cacheMode()); assertEquals(3, clusteringCfg.hash().numOwners()); assertEquals(51, clusteringCfg.hash().numSegments()); }
assertFalse(c.invocationBatching().enabled()); assertEquals(1200000, c.clustering().l1().lifespan()); assertEquals(4, c.clustering().hash().numOwners()); assertEquals(35000, c.clustering().remoteTimeout()); assertEquals(2, c.clustering().hash().numSegments()); assertTrue(c.clustering().hash().consistentHashFactory() instanceof SyncConsistentHashFactory); assertTrue(c.clustering().partitionHandling().enabled()); assertTrue(c.jmxStatistics().enabled());
protected AbstractHashDistAwareResponse createHashDistAwareResp(int topologyId, Map<Address, ServerAddress> serverEndpointsMap, Configuration cfg) { return new HashDistAwareResponse(topologyId, serverEndpointsMap, 0, cfg.clustering().hash().numOwners(), Constants.DEFAULT_CONSISTENT_HASH_VERSION_1x, Integer.MAX_VALUE); }
/** * Enable the clustering.hash.groups configuration if it's not already enabled. * <p> * Infinispan requires this option enabled because we are using fine grained maps. * The function will log a warning if the property is disabled. * * @return the updated configuration */ private static Configuration enableClusteringHashGroups(String cacheName, Configuration configuration) { if ( configuration.clustering().hash().groups().enabled() ) { return configuration; } LOG.clusteringHashGroupsMustBeEnabled( cacheName ); ConfigurationBuilder builder = new ConfigurationBuilder().read( configuration ); builder.clustering().hash().groups().enabled(); return builder.build(); }
public void testSimpleDistributedClusterModeNamedCache() throws Exception { final String cacheName = "my-cache"; final Configuration config = new ConfigurationBuilder() .clustering().cacheMode(DIST_SYNC) .hash().numOwners(3).numSegments(51).build(); cm = TestCacheManagerFactory.createClusteredCacheManager(); cm.defineConfiguration(cacheName, config); Cache<?, ?> cache = cm.getCache(cacheName); ClusteringConfiguration clusteringCfg = cache.getCacheConfiguration().clustering(); assertEquals(DIST_SYNC, clusteringCfg.cacheMode()); assertEquals(3, clusteringCfg.hash().numOwners()); assertEquals(51, clusteringCfg.hash().numSegments()); }
@Override protected AbstractHashDistAwareResponse createHashDistAwareResp(int topologyId, Map<Address, ServerAddress> serverEndpointsMap, Configuration cfg) { return new HashDistAware11Response(topologyId, serverEndpointsMap, cfg.clustering().hash().numOwners(), Constants.DEFAULT_CONSISTENT_HASH_VERSION_1x, Integer.MAX_VALUE, 1); }
/** * Enable the clustering.hash.groups configuration if it's not already enabled. * <p> * Infinispan requires this option enabled because we are using fine grained maps. * The function will log a warning if the property is disabled. * * @return the updated configuration */ private static Configuration enableClusteringHashGroups(String cacheName, Configuration configuration) { if ( configuration.clustering().hash().groups().enabled() ) { return configuration; } LOG.clusteringHashGroupsMustBeEnabled( cacheName ); ConfigurationBuilder builder = new ConfigurationBuilder().read( configuration ); builder.clustering().hash().groups().enabled(); return builder.build(); }
KeyPartitioner keyPartitioner = registry.getComponent(KeyPartitioner.class); if (configuration.segmented()) { handler = new SegmentedRocksDBHandler(cache.getCacheConfiguration().clustering().hash().numSegments(), keyPartitioner); } else {
@ManagedAttribute(description = "Number of replicas for each key", displayName = "Replication Degree") public double getReplicationDegree() { switch (cacheConfiguration.clustering().cacheMode()) { case DIST_SYNC: case DIST_ASYNC: return cacheConfiguration.clustering().hash().numOwners(); case REPL_ASYNC: case REPL_SYNC: return rpcManager.getMembers().size(); default: return 1; } }
public ConsistentHashLocality(Cache<?, ?> cache, ConsistentHash hash) { this.topology = new LocalizedCacheTopology(cache.getCacheConfiguration().clustering().cacheMode(), new CacheTopology(0, 0, hash, null, CacheTopology.Phase.NO_REBALANCE, Collections.emptyList(), Collections.emptyList()), cache.getCacheConfiguration().clustering().hash().keyPartitioner(), cache.getCacheManager().getAddress(), true); }
public static <K, V> DataContainer<K, V> createDataContainer(ConfigurationBuilder builder, long size, Predicate<K> evictable) { EntrySizeCalculator<? super K, ? super InternalCacheEntry<K, V>> calculator = (key, entry) -> evictable.test(key) ? 1 : 0; return builder.clustering().cacheMode().needsStateTransfer() ? new BoundedSegmentedDataContainer<>(builder.clustering().hash().create().numSegments(), size, calculator) : new EvictableDataContainer<>(size, calculator); } }
protected MagicKey magicKey(Cache<Object, String> cache1, Cache<Object, String> cache2) { if (cache1.getCacheConfiguration().clustering().hash().numOwners() < 2) { return new MagicKey(cache1); } else { return new MagicKey(cache1, cache2); } }
protected void checkConsistencyAcrossCluster(String cacheName, Configuration configuration) { // Initialize the partitioner to ensure we can compare config equality configuration.clustering().hash().keyPartitioner().init(configuration.clustering().hash()); for (EmbeddedCacheManager m : cacheManagers) { Configuration actualConfiguration = m.getCacheConfiguration(cacheName); assertNotNull("Cache " + cacheName + " missing from " + m, actualConfiguration); assertEquals(configuration, actualConfiguration); Cache<Object, Object> cache = m.getCache(cacheName); assertEquals(cacheManagers.size(), cache.getAdvancedCache().getRpcManager().getMembers().size()); } }
/** * @return how many segments there are across all nodes or 1 if the config is not segmented (all shared) */ protected int calculateTotalSegmentsForAllNodes() { return segmented ? c1.getCacheConfiguration().clustering().hash().numSegments() : 1; }
public void testNumOwners() { ConfigurationBuilder cb = new ConfigurationBuilder(); cb.clustering().cacheMode(CacheMode.DIST_SYNC); cb.clustering().hash().numOwners(5); Configuration c = cb.build(); Assert.assertEquals(5, c.clustering().hash().numOwners()); try { cb.clustering().hash().numOwners(0); Assert.fail("IllegalArgumentException expected"); } catch (IllegalArgumentException e) { } }
public void testKeySegmentFilter() { Cache<Integer, String> cache = getCache(0); int range = 12; // First populate the cache with a bunch of values IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value")); assertEquals(range, cache.size()); CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet(); // Take the first half of the segments int segments = cache.getCacheConfiguration().clustering().hash().numSegments() / 2; AtomicInteger realCount = new AtomicInteger(); KeyPartitioner keyPartitioner = cache.getAdvancedCache().getComponentRegistry().getComponent(KeyPartitioner.class); cache.forEach((k, v) -> { if (segments >= keyPartitioner.getSegment(k)) { realCount.incrementAndGet(); } }); assertEquals(realCount.get(), createStream(entrySet).filterKeySegments( IntStream.range(0, segments).boxed().collect(Collectors.toSet())).count()); }
@Test(expectedExceptions = IllegalArgumentException.class) public void testNumOwners() { ConfigurationBuilder cb = new ConfigurationBuilder(); cb.clustering().cacheMode(CacheMode.DIST_SYNC); cb.clustering().hash().numOwners(5); Configuration c = cb.build(); Assert.assertEquals(5, c.clustering().hash().numOwners()); // negative test cb.clustering().hash().numOwners(0); }