/** * Gets DHT affinity nodes. * * @param ctx Cache context. * @param topVer Topology version. * @return Cache affinity nodes for given topology version. */ public static Collection<ClusterNode> affinityNodes(GridCacheContext ctx, AffinityTopologyVersion topVer) { return ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), topVer); }
/** * Constructor of full cache context. * * @param cctx Cache context. * @param clientCache Client cache or not. */ public GridCacheContextInfo(GridCacheContext<K, V> cctx, boolean clientCache) { config = cctx.config(); dynamicDeploymentId = null; groupId = cctx.groupId(); cacheId = cctx.cacheId(); this.clientCache = clientCache; this.cctx = cctx; }
/** * Filter outs data entries from given data record that not satisfy {@link #cacheGroupPredicate}. * * @param record Original data record. * @return Data record with filtered data entries. */ private DataRecord filterEntriesByGroupId(DataRecord record) { List<DataEntry> filteredEntries = record.writeEntries().stream() .filter(entry -> { int cacheId = entry.cacheId(); return cctx.cacheContext(cacheId) != null && cacheGroupPredicate.apply(cctx.cacheContext(cacheId).groupId()); }) .collect(Collectors.toList()); return record.setWriteEntries(filteredEntries); }
/** {@inheritDoc} */ @Override public Map<ClusterNode, Collection<K>> mapKeysToNodes(@Nullable Collection<? extends K> keys) { A.notNull(keys, "keys"); AffinityTopologyVersion topVer = topologyVersion(); int nodesCnt; if (!cctx.isLocal()) nodesCnt = cctx.discovery().cacheGroupAffinityNodes(cctx.groupId(), topVer).size(); else nodesCnt = 1; // Must return empty map if no alive nodes present or keys is empty. Map<ClusterNode, Collection<K>> res = new HashMap<>(nodesCnt, 1.0f); for (K key : keys) { ClusterNode primary = cctx.affinity().primaryByKey(key, topVer); if (primary == null) throw new IgniteException("Failed to get primary node [topVer=" + topVer + ", key=" + key + ']'); Collection<K> mapped = res.get(primary); if (mapped == null) { mapped = new ArrayList<>(Math.max(keys.size() / nodesCnt, 16)); res.put(primary, mapped); } mapped.add(key); } return res; }
/** * @param cctx Cache context. */ private void addCacheContext(GridCacheContext cctx) { assert cacheType.userCache() == cctx.userCache() : cctx.name(); assert grpId == cctx.groupId() : cctx.name(); final boolean add; synchronized (this) { List<GridCacheContext> copy = new ArrayList<>(caches); assert sharedGroup() || copy.isEmpty(); add = copy.add(cctx); caches = Collections.unmodifiableList(copy); } assert add : cctx.name(); if (!qryEnabled && QueryUtils.isEnabled(cctx.config())) qryEnabled = true; if (!drEnabled && cctx.isDrEnabled()) drEnabled = true; }
/** * @param rec Data record. * @return {@code True} if this data record should be encrypted. */ boolean isDataRecordEncrypted(DataRecord rec) { if (encryptionDisabled) return false; for (DataEntry e : rec.writeEntries()) { if (cctx.cacheContext(e.cacheId()) != null && needEncryption(cctx.cacheContext(e.cacheId()).groupId())) return true; } return false; }
/** * @param dataRec Data record to serialize. * @return Full data record size. * @throws IgniteCheckedException If failed to obtain the length of one of the entries. */ protected int dataSize(DataRecord dataRec) throws IgniteCheckedException { boolean encrypted = isDataRecordEncrypted(dataRec); int sz = 0; for (DataEntry entry : dataRec.writeEntries()) { int clSz = entrySize(entry); if (!encryptionDisabled && needEncryption(cctx.cacheContext(entry.cacheId()).groupId())) sz += encSpi.encryptedSize(clSz) + 1 /* encrypted flag */ + 4 /* groupId */ + 4 /* data size */; else { sz += clSz; if (encrypted) sz += 1 /* encrypted flag */; } } return sz; }
/** * @param expVer Expected topology version. * @param curVer Current topology version. * @return {@code True} if cache affinity changed and operation should be remapped. */ protected final boolean needRemap(AffinityTopologyVersion expVer, AffinityTopologyVersion curVer, Collection<KeyCacheObject> keys) { if (curVer.equals(expVer)) return false; AffinityTopologyVersion lastAffChangedTopVer = ctx.shared().exchange().lastAffinityChangedTopologyVersion(expVer); if (curVer.compareTo(lastAffChangedTopVer) >= 0 && curVer.compareTo(expVer) <= 0) return false; // TODO IGNITE-7164 check mvcc crd for mvcc enabled txs. Collection<ClusterNode> cacheNodes0 = ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), expVer); Collection<ClusterNode> cacheNodes1 = ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), curVer); if (!cacheNodes0.equals(cacheNodes1) || ctx.affinity().affinityTopologyVersion().compareTo(curVer) < 0) return true; try { List<List<ClusterNode>> aff1 = ctx.affinity().assignments(expVer); List<List<ClusterNode>> aff2 = ctx.affinity().assignments(curVer); return !aff1.equals(aff2); } catch (IllegalStateException ignored) { return true; } }
Set<ClusterNode> dataNodes = new HashSet<>(dataNodes(cctx.groupId(), NONE));
boolean stopped = reconnectRes.stoppedCacheGroups().contains(cache.context().groupId()) || reconnectRes.stoppedCaches().contains(cache.name());
int grpId = cctx.groupId();
/** */ @Test public void testDestroyCacheCreation() { final String cacheName0 = "cache0"; final String cacheName1 = "cache1"; grid().getOrCreateCache(cacheConfiguration(cacheName0, false)); int grpId = grid().cachex(cacheName0).context().groupId(); assertNull(rowCache(grid(), grpId)); grid().getOrCreateCache(cacheConfiguration(cacheName1, true)); assertEquals(grpId, grid().cachex(cacheName1).context().groupId()); assertNotNull(rowCache(grid(), grpId)); }
/** * Handle of cache context which was fully prepared. * * @param cacheCtx Fully prepared context. * @throws IgniteCheckedException if failed. */ private void onCacheStarted(GridCacheContext cacheCtx) throws IgniteCheckedException { GridCacheAdapter cache = cacheCtx.cache(); CacheConfiguration cfg = cacheCtx.config(); CacheGroupContext grp = cacheGrps.get(cacheCtx.groupId()); cacheCtx.onStarted(); String dataRegion = cfg.getDataRegionName(); if (dataRegion == null && ctx.config().getDataStorageConfiguration() != null) dataRegion = ctx.config().getDataStorageConfiguration().getDefaultDataRegionConfiguration().getName(); if (log.isInfoEnabled()) { log.info("Started cache [name=" + cfg.getName() + ", id=" + cacheCtx.cacheId() + (cfg.getGroupName() != null ? ", group=" + cfg.getGroupName() : "") + ", dataRegionName=" + dataRegion + ", mode=" + cfg.getCacheMode() + ", atomicity=" + cfg.getAtomicityMode() + ", backups=" + cfg.getBackups() + ", mvcc=" + cacheCtx.mvccEnabled() + ']'); } grp.onCacheStarted(cacheCtx); onKernalStart(cache); }
/** * Test that queues within the same group and compatible configurations are stored in the same cache. * * @throws Exception If failed. */ @Test public void testCacheReuse() throws Exception { CollectionConfiguration colCfg = collectionConfiguration(); colCfg.setAtomicityMode(ATOMIC); colCfg.setGroupName("grp1"); IgniteQueue queue1 = initQueue(0, "queue1", 100, colCfg); IgniteQueue queue2 = initQueue(0, "queue2", 100, colCfg); assert cctx(queue1).cacheId() == cctx(queue2).cacheId(); colCfg.setAtomicityMode(TRANSACTIONAL); IgniteQueue queue3 = initQueue(0, "queue3", 100, colCfg); IgniteQueue queue4 = initQueue(0, "queue4", 100, colCfg); assert cctx(queue3).cacheId() == cctx(queue4).cacheId(); assert cctx(queue1).cacheId() != cctx(queue3).cacheId(); assert cctx(queue1).groupId() == cctx(queue3).groupId(); colCfg.setGroupName("gtp2"); IgniteQueue queue5 = initQueue(0, "queue5", 100, colCfg); IgniteQueue queue6 = initQueue(0, "queue6", 100, colCfg); assert cctx(queue5).cacheId() == cctx(queue6).cacheId(); assert cctx(queue1).groupId() != cctx(queue5).groupId(); }
/** * @param collocated Collocation flag. */ private void testCacheReuse(boolean collocated) { Ignite ignite = grid(0); CollectionConfiguration colCfg = collectionConfiguration().setCollocated(collocated); colCfg.setAtomicityMode(ATOMIC); colCfg.setGroupName("grp1"); IgniteSet set1 = ignite.set("set1", colCfg); IgniteSet set2 = ignite.set("set2", colCfg); assertEquals(separated(set1), cctx(set1).cacheId() != cctx(set2).cacheId()); colCfg.setAtomicityMode(TRANSACTIONAL); IgniteSet set3 = ignite.set("set3", colCfg); IgniteSet set4 = ignite.set("set4", colCfg); assertEquals(separated(set3), cctx(set3).cacheId() != cctx(set4).cacheId()); assertTrue(cctx(set1).cacheId() != cctx(set3).cacheId()); assertTrue(cctx(set1).groupId() == cctx(set3).groupId()); colCfg.setGroupName("gtp2"); IgniteSet set5 = ignite.set("set5", colCfg); IgniteSet set6 = ignite.set("set6", colCfg); assertEquals(separated(set5), cctx(set5).cacheId() != cctx(set6).cacheId()); assertTrue(cctx(set1).groupId() != cctx(set5).groupId()); Stream.of(set1, set2, set3, set4, set5, set6).forEach(IgniteSet::close); }
/** * @throws Exception If failed. */ @SuppressWarnings("unchecked") private void checkUpdateEntry() throws Exception { final String cacheName = "cache"; grid().getOrCreateCache(cacheConfiguration(cacheName, true)); int grpId = grid().cachex(cacheName).context().groupId(); assertEquals(grpId, grid().cachex(cacheName).context().groupId()); fillCache(cacheName); H2RowCache rowCache = rowCache(grid(), grpId); fillRowCache(cacheName); assertNotNull(rowCache); int key = RND.nextInt(ENTRIES); long rowLink = getLinkForKey(cacheName, rowCache, key); int rowCacheSize = rowCache.size(); assertNotNull(rowCache.get(rowLink)); // Update row grid().cache(cacheName).put(key, new Value(key + 1)); assertNull(rowCache.get(rowLink)); int rowCacheSizeAfterUpdate = rowCache.size(); assertEquals(rowCacheSize - 1, rowCacheSizeAfterUpdate); // Check updated value. List<Cache.Entry<Integer, Value>> res = grid().<Integer, Value>cache(cacheName) .query(new SqlQuery(Value.class, "_key = " + key)).getAll(); assertEquals(1, res.size()); assertEquals(key + 1, (int)res.get(0).getValue().lVal); }
/** * @throws Exception If failed. */ @SuppressWarnings("unchecked") private void checkDeleteEntry() throws Exception { final String cacheName = "cache"; grid().getOrCreateCache(cacheConfiguration(cacheName, true)); int grpId = grid().cachex(cacheName).context().groupId(); assertEquals(grpId, grid().cachex(cacheName).context().groupId()); fillCache(cacheName); H2RowCache rowCache = rowCache(grid(), grpId); fillRowCache(cacheName); assertNotNull(rowCache); int key = RND.nextInt(ENTRIES); grid().cache(cacheName) .query(new SqlQuery(Value.class, "_key = " + key)).getAll(); int rowCacheSize = rowCache.size(); long rowLink = getLinkForKey(cacheName, rowCache, key); assertNotNull(rowCache.get(rowLink)); // Remove grid().cache(cacheName).remove(key); assertNull(rowCache.get(rowLink)); int rowCacheSizeAfterUpdate = rowCache.size(); assertEquals(rowCacheSize - 1, rowCacheSizeAfterUpdate); }
/** */ private void checkRowCacheOnPageEviction() { grid().getOrCreateCache(cacheConfiguration(CACHE_NAME, true)); int grpId = grid().cachex(CACHE_NAME).context().groupId(); assertEquals(grpId, grid().cachex(CACHE_NAME).context().groupId()); try (IgniteDataStreamer<Integer, Value> stream = grid().dataStreamer(CACHE_NAME)) { for (int i = 0; i < ENTRIES; ++i) stream.addData(i, new Value(i)); } H2RowCache rowCache = rowCache(grid()).forGroup(grpId); fillRowCache(CACHE_NAME); assertNotNull(rowCache); int rowCacheSizeBeforeEvict = rowCache.size(); try (IgniteDataStreamer<Integer, Value> stream = grid().dataStreamer(CACHE_NAME)) { for (int i = ENTRIES; i < 2 * ENTRIES; ++i) stream.addData(i, new Value(i)); } assertTrue("rowCache size before evictions: " + rowCacheSizeBeforeEvict + ", after evictions: " + rowCache.size(), rowCacheSizeBeforeEvict > rowCache.size()); }