/** * @param p Partition. * @param cctx Cache context. * @return Owning node ID. */ public UUID nodeForPartition(int p, GridCacheContext<?, ?> cctx) { UUID[] nodeIds = partsNodes; if (nodeIds == null) { assert partsMap != null; nodeIds = new UUID[cctx.affinity().partitions()]; for (Map.Entry<UUID, int[]> e : partsMap.entrySet()) { UUID nodeId = e.getKey(); int[] nodeParts = e.getValue(); assert nodeId != null; assert !F.isEmpty(nodeParts); for (int part : nodeParts) { assert nodeIds[part] == null; nodeIds[part] = nodeId; } } partsNodes = nodeIds; } return nodeIds[p]; }
/** * @param cctx Cache context. * @param prj Projection (optional). * @return Collection of data nodes in provided projection (if any). * @throws IgniteCheckedException If partition number is invalid. */ private static Collection<ClusterNode> nodes(final GridCacheContext<?, ?> cctx, @Nullable final ClusterGroup prj, @Nullable final Integer part) throws IgniteCheckedException { assert cctx != null; final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion(); Collection<ClusterNode> affNodes = CU.affinityNodes(cctx, topVer); if (prj == null && part == null) return affNodes; if (part != null && part >= cctx.affinity().partitions()) throw new IgniteCheckedException("Invalid partition number: " + part); final Set<ClusterNode> owners = part == null ? Collections.<ClusterNode>emptySet() : new HashSet<>(cctx.topology().owners(part, topVer)); return F.view(affNodes, new P1<ClusterNode>() { @Override public boolean apply(ClusterNode n) { return cctx.discovery().cacheAffinityNode(n, cctx.name()) && (prj == null || prj.node(n.id()) != null) && (part == null || owners.contains(n)); } }); }
U.forRange(0, cctx.affinity().partitions());
/** * @return Nodes to execute on. */ private Collection<ClusterNode> nodes() throws IgniteCheckedException { CacheMode cacheMode = cctx.config().getCacheMode(); Integer part = partition(); switch (cacheMode) { case LOCAL: if (prj != null) U.warn(log, "Ignoring query projection because it's executed over LOCAL cache " + "(only local node will be queried): " + this); if (type == SCAN && cctx.config().getCacheMode() == LOCAL && part != null && part >= cctx.affinity().partitions()) throw new IgniteCheckedException("Invalid partition number: " + part); return Collections.singletonList(cctx.localNode()); case REPLICATED: if (prj != null || part != null) return nodes(cctx, prj, part); if (cctx.affinityNode()) return Collections.singletonList(cctx.localNode()); Collection<ClusterNode> affNodes = nodes(cctx, null, null); return affNodes.isEmpty() ? affNodes : Collections.singletonList(F.rand(affNodes)); case PARTITIONED: return nodes(cctx, prj, part); default: throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode); } }
for (int i = 0; i < cctx.affinity().partitions(); i++) { try { Map<Object, Object> exp = entries.get(i);
for (int p = 0, parts = cctx.affinity().partitions(); p < parts; p++) { List<ClusterNode> owners = cctx.topology().owners(p);
int partsCnt = cctx.affinity().partitions();
/** * @throws Exception In case of error. */ @Test public void testScanPartitionQuery() throws Exception { IgniteCache<Integer, Integer> cache = jcache(Integer.class, Integer.class); GridCacheContext cctx = ((IgniteCacheProxy)cache).context(); Map<Integer, Map<Integer, Integer>> entries = new HashMap<>(); for (int i = 0; i < KEY_CNT; i++) { cache.put(i, i); int part = cctx.affinity().partition(i); Map<Integer, Integer> partEntries = entries.get(part); if (partEntries == null) entries.put(part, partEntries = new HashMap<>()); partEntries.put(i, i); } for (int i = 0; i < cctx.affinity().partitions(); i++) { ScanQuery<Integer, Integer> scan = new ScanQuery<>(i); Collection<Cache.Entry<Integer, Integer>> actual = cache.query(scan).getAll(); Map<Integer, Integer> exp = entries.get(i); int size = exp == null ? 0 : exp.size(); assertEquals("Failed for partition: " + i, size, actual.size()); if (exp == null) assertTrue(actual.isEmpty()); else for (Cache.Entry<Integer, Integer> entry : actual) assertTrue(entry.getValue().equals(exp.get(entry.getKey()))); } }
/** * @param ctx Context. * @throws IgniteCheckedException In case of error. */ void waitTopologyFuture(GridKernalContext ctx) throws IgniteCheckedException { GridCacheContext<K, V> cctx = cacheContext(ctx); if (!cctx.isLocal()) { AffinityTopologyVersion topVer = initTopVer; cacheContext(ctx).shared().exchange().affinityReadyFuture(topVer).get(); for (int partId = 0; partId < cacheContext(ctx).affinity().partitions(); partId++) getOrCreatePartitionRecovery(ctx, partId, topVer); } }
final int partsCnt = cctx.affinity().partitions(); continue; int parts = extraCctx.affinity().partitions(); continue; for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) { List<ClusterNode> owners = extraCctx.topology().owners(p);
assertEquals(ctx.affinity().partitions(), parts.size());
int parts = cache.context().affinity().partitions(); int parts = cache.context().affinity().partitions();
/** * Checks that after exchange all nodes have consistent state about partition owners. * * @throws Exception If failed. */ private void checkTopologiesConsistency() throws Exception { List<Ignite> nodes = G.allGrids(); IgniteEx crdNode = null; for (Ignite node : nodes) { ClusterNode locNode = node.cluster().localNode(); if (crdNode == null || locNode.order() < crdNode.localNode().order()) crdNode = (IgniteEx) node; } for (Ignite node : nodes) { IgniteEx node0 = (IgniteEx) node; if (node0.localNode().id().equals(crdNode.localNode().id())) continue; for (IgniteInternalCache cache : node0.context().cache().caches()) { int partitions = cache.context().affinity().partitions(); for (int p = 0; p < partitions; p++) { List<ClusterNode> crdOwners = crdNode.cachex(cache.name()).cache().context().topology().owners(p); List<ClusterNode> owners = cache.context().topology().owners(p); assertEquals(crdOwners, owners); } } } }
if (part != null && (part < 0 || part >= cctx.affinity().partitions())) return new GridEmptyCloseableIterator() { @Override public void close() throws IgniteCheckedException {
/** * @param p Partition. * @param cctx Cache context. * @return Owning node ID. */ public UUID nodeForPartition(int p, GridCacheContext<?, ?> cctx) { UUID[] nodeIds = partsNodes; if (nodeIds == null) { assert partsMap != null; nodeIds = new UUID[cctx.affinity().partitions()]; for (Map.Entry<UUID, int[]> e : partsMap.entrySet()) { UUID nodeId = e.getKey(); int[] nodeParts = e.getValue(); assert nodeId != null; assert !F.isEmpty(nodeParts); for (int part : nodeParts) { assert nodeIds[part] == null; nodeIds[part] = nodeId; } } partsNodes = nodeIds; } return nodeIds[p]; }
/** * @param cctx Cache context. * @param prj Projection (optional). * @return Collection of data nodes in provided projection (if any). * @throws IgniteCheckedException If partition number is invalid. */ private static Collection<ClusterNode> nodes(final GridCacheContext<?, ?> cctx, @Nullable final ClusterGroup prj, @Nullable final Integer part) throws IgniteCheckedException { assert cctx != null; final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion(); Collection<ClusterNode> affNodes = CU.affinityNodes(cctx, topVer); if (prj == null && part == null) return affNodes; if (part != null && part >= cctx.affinity().partitions()) throw new IgniteCheckedException("Invalid partition number: " + part); final Set<ClusterNode> owners = part == null ? Collections.<ClusterNode>emptySet() : new HashSet<>(cctx.topology().owners(part, topVer)); return F.view(affNodes, new P1<ClusterNode>() { @Override public boolean apply(ClusterNode n) { return cctx.discovery().cacheAffinityNode(n, cctx.name()) && (prj == null || prj.node(n.id()) != null) && (part == null || owners.contains(n)); } }); }
U.forRange(0, cctx.affinity().partitions());
/** * @return Nodes to execute on. */ private Collection<ClusterNode> nodes() throws IgniteCheckedException { CacheMode cacheMode = cctx.config().getCacheMode(); Integer part = partition(); switch (cacheMode) { case LOCAL: if (prj != null) U.warn(log, "Ignoring query projection because it's executed over LOCAL cache " + "(only local node will be queried): " + this); if (type == SCAN && cctx.config().getCacheMode() == LOCAL && part != null && part >= cctx.affinity().partitions()) throw new IgniteCheckedException("Invalid partition number: " + part); return Collections.singletonList(cctx.localNode()); case REPLICATED: if (prj != null || part != null) return nodes(cctx, prj, part); if (cctx.affinityNode()) return Collections.singletonList(cctx.localNode()); Collection<ClusterNode> affNodes = nodes(cctx, null, null); return affNodes.isEmpty() ? affNodes : Collections.singletonList(F.rand(affNodes)); case PARTITIONED: return nodes(cctx, prj, part); default: throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode); } }
for (int p = 0, parts = cctx.affinity().partitions(); p < parts; p++) { List<ClusterNode> owners = cctx.topology().owners(p);
/** * @param ctx Context. * @throws IgniteCheckedException In case of error. */ void waitTopologyFuture(GridKernalContext ctx) throws IgniteCheckedException { GridCacheContext<K, V> cctx = cacheContext(ctx); if (!cctx.isLocal()) { AffinityTopologyVersion topVer = initTopVer; cacheContext(ctx).shared().exchange().affinityReadyFuture(topVer).get(); for (int partId = 0; partId < cacheContext(ctx).affinity().partitions(); partId++) getOrCreatePartitionRecovery(ctx, partId, topVer); } }