@Override public void onEvent(GridEvent evt) { assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_JOINED; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent)evt; // Notify backup worker on each topology change. if (CU.affinityNode(cctx, discoEvt.eventNode())) backupWorker.addEvent(discoEvt); } },
/** * @return {@code True} if local node can calculate affinity on it's own for this partition map exchange. */ private boolean canCalculateAffinity() { GridCacheAffinityFunction affFunc = cctx.config().getAffinity(); // Do not request affinity from remote nodes if affinity function is not centralized. if (!U.hasAnnotation(affFunc, GridCacheCentralizedAffinityFunction.class)) return true; // If local node did not initiate exchange or local node is the only cache node in grid. Collection<GridNode> affNodes = CU.affinityNodes(cctx, exchId.topologyVersion()); return !exchId.nodeId().equals(cctx.localNodeId()) || (affNodes.size() == 1 && affNodes.contains(cctx.localNode())); }
/** * @param cctx Cache context. * @param prj Projection (optional). * @return Collection of data nodes in provided projection (if any). */ private static Collection<GridNode> nodes(final GridCacheContext<?, ?> cctx, @Nullable final GridProjection prj) { assert cctx != null; return F.view(CU.allNodes(cctx), new P1<GridNode>() { @Override public boolean apply(GridNode n) { GridCacheDistributionMode mode = U.distributionMode(n, cctx.name()); return (mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED) && (prj == null || prj.node(n.id()) != null); } }); }
/** * @param spaceName Space name. * @param keyBytes Key bytes. */ @SuppressWarnings( {"unchecked"}) public void onEvictFromSwap(String spaceName, byte[] keyBytes) { assert spaceName != null; assert keyBytes != null; /* * NOTE: this method should not have any synchronization because * it is called from synchronization block within Swap SPI. */ GridCacheAdapter cache = caches.get(CU.cacheNameForSwapSpaceName(spaceName)); assert cache != null : "Failed to resolve cache name for swap space name: " + spaceName; GridCacheContext cctx = cache.configuration().getCacheMode() == PARTITIONED ? ((GridNearCacheAdapter<?, ?>)cache).dht().context() : cache.context(); if (spaceName.equals(CU.swapSpaceName(cctx))) { GridCacheQueryManager qryMgr = cctx.queries(); if (qryMgr != null) { try { Object key = cctx.marshaller().unmarshal(keyBytes, cctx.deploy().globalLoader()); qryMgr.remove(key, keyBytes); } catch (GridException e) { U.error(log, "Failed to unmarshal key evicted from swap [swapSpaceName=" + spaceName + ']', e); } } } }
drSysCaches.add(CU.cacheNameForDrSystemCache(cacheName)); if (CU.isDrSystemCache(ccfg.getName())) throw new GridException("Cache name cannot start with \"" + CU.SYS_CACHE_DR_PREFIX + "\" because this prefix is reserved for internal purposes."); if (CU.isHadoopSystemCache(ccfg.getName())) throw new GridException("Cache name cannot be \"" + CU.SYS_CACHE_HADOOP_MR + "\" because it is reserved for internal purposes."); drSysCaches.add(CU.cacheNameForDrSystemCache(ccfg.getName())); if (CU.isUtilityCache(ccfg.getName())) throw new GridException("Cache name cannot start with \"" + CU.UTILITY_CACHE_NAME + "\" because this prefix is reserved for internal purposes."); copies[cloneIdx++] = CU.hadoopSystemCache(); copies[cloneIdx++] = CU.drSystemCache(drSysCache); copies[idx++] = CU.hadoopSystemCache(); copies[idx++] = CU.drSystemCache(drSysCache);
@Override public Boolean apply(GridNearLockResponse<K, V> res, Exception e) { if (CU.isLockTimeoutOrCancelled(e) || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false; boolean pass = res.filterResult(i); tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse()); cctx.events().addEvent(entry.partition(), entry.key(), tx, null, EVT_CACHE_OBJECT_READ, newVal, newVal != null, oldVal, hasBytes, CU.subjectId(tx, cctx), null, inTx() ? tx.resolveTaskName() : null);
rmtNodes = new ConcurrentLinkedQueue<>(CU.aliveRemoteNodes(cctx, exchId.topologyVersion())); new GridDhtAssignmentFetchFuture<>(cctx, exchId.topologyVersion(), CU.affinityNodes(cctx));
public GridCacheOptimisticCheckPreparedTxFuture(GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId, Map<UUID, Collection<UUID>> txNodes) { super(cctx.kernalContext(), CU.boolReducer());
F.eq(ctx.name(), CU.cacheNameForDrSystemCache(ccfg.getName()))) { drSysCache = true; if (F.eq(ctx.name(), CU.cacheNameForDrSystemCache(cacheName))) { drSysCache = true;
/** * @param keys Keys. * @param keyFilter Key filter. * @param filter Entry filter. * @return Entry set. */ public Set<GridCacheEntry<K, V>> entrySet(@Nullable Collection<? extends K> keys, @Nullable GridPredicate<K> keyFilter, @Nullable GridPredicate<GridCacheEntry<K, V>>... filter) { if (F.isEmpty(keys)) return emptySet(); if (keyCheck) validateCacheKeys(keys); return new GridCacheEntrySet<>(ctx, F.viewReadOnly(keys, CU.cacheKey2Entry(ctx), keyFilter), filter); }
for (GridCacheAttributes locAttr : locAttrs) { if (F.eq(rmtAttr.cacheName(), locAttr.cacheName())) { CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "cacheMode", "Cache mode", locAttr.cacheMode(), rmtAttr.cacheMode(), true); CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "interceptor", "Cache Interceptor", interceptor(ctx.discovery().localNode(), rmtAttr.cacheName()), interceptor(rmt, rmtAttr.cacheName()), true); CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "atomicityMode", "Cache atomicity mode", locAttr.atomicityMode(), rmtAttr.atomicityMode(), true); CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "cachePreloadMode", "Cache preload mode", locAttr.cachePreloadMode(), rmtAttr.cachePreloadMode(), true); CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "cacheAffinity", "Cache affinity", locAttr.cacheAffinityClassName(), rmtAttr.cacheAffinityClassName(), true); CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "cacheAffinityMapper", "Cache affinity mapper", locAttr.cacheAffinityMapperClassName(), rmtAttr.cacheAffinityMapperClassName(), true); CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "affinityPartitionsCount", "Affinity partitions count", locAttr.affinityPartitionsCount(), rmtAttr.affinityPartitionsCount(), true); CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "evictionFilter", "Eviction filter", locAttr.evictionFilterClassName(), rmtAttr.evictionFilterClassName(), true);
long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert keys != null;
sysCaches.add(CU.cacheNameForDrSystemCache(ccfg.getName())); sysCaches.add(CU.cacheNameForDrSystemCache(cacheName));
/** * @param ctx Context. * @param keys Keys. * @return Mapped keys. */ @SuppressWarnings( {"unchecked", "MismatchedQueryAndUpdateOfCollection"}) public static <K> Map<GridNode, Collection<K>> mapKeysToNodes(GridCacheContext<K, ?> ctx, Collection<? extends K> keys) { if (keys == null || keys.isEmpty()) return Collections.emptyMap(); // Map all keys to local node for local caches. if (ctx.config().getCacheMode() == LOCAL) return F.asMap(ctx.localNode(), (Collection<K>)keys); long topVer = ctx.discovery().topologyVersion(); if (CU.affinityNodes(ctx, topVer).isEmpty()) return Collections.emptyMap(); if (keys.size() == 1) return Collections.singletonMap(ctx.affinity().primary(F.first(keys), topVer), (Collection<K>)keys); Map<GridNode, Collection<K>> map = new GridLeanMap<>(5); for (K k : keys) { GridNode primary = ctx.affinity().primary(k, topVer); Collection<K> mapped = map.get(primary); if (mapped == null) map.put(primary, mapped = new LinkedList<>()); mapped.add(k); } return map; }
Collection<UUID> allIds = topVer > 0 ? F.nodeIds(CU.allNodes(cctx, topVer)) : null;
long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null;
/** * @param topVer Topology version. * @return Nodes where set data request should be sent. * @throws GridException If all cache nodes left grid. */ @SuppressWarnings("unchecked") private Collection<GridNode> dataNodes(long topVer) throws GridException { if (ctx.isLocal() || ctx.isReplicated()) return Collections.singleton(ctx.localNode()); Collection<GridNode> nodes; if (collocated) { List<GridNode> nodes0 = ctx.affinity().nodes(hdrPart, topVer); nodes = !nodes0.isEmpty() ? Collections.singleton(nodes0.contains(ctx.localNode()) ? ctx.localNode() : F.first(nodes0)) : nodes0; } else nodes = CU.affinityNodes(ctx, topVer); if (nodes.isEmpty()) throw new GridException("Failed to get set data, all cache nodes left grid."); return nodes; }
/** * @param cctx Context. * @param tx Transaction. * @param failedNodeId ID of failed node started transaction. */ @SuppressWarnings("ConstantConditions") public GridCachePessimisticCheckCommittedTxFuture(GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) { super(cctx.kernalContext(), new SingleReducer<K, V>()); this.cctx = cctx; this.tx = tx; this.failedNodeId = failedNodeId; nodes = new GridLeanMap<>(); for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node); }