/** * @param grpCtx Group context. Can be {@code null} in case of crash recovery. * @param grpId Group ID. * @param partId Partition ID. */ private void schedulePartitionDestroy(@Nullable CacheGroupContext grpCtx, int grpId, int partId) { synchronized (this) { scheduledCp.destroyQueue.addDestroyRequest(grpCtx, grpId, partId); } if (log.isDebugEnabled()) log.debug("Partition file has been scheduled to destroy [grpId=" + grpId + ", partId=" + partId + "]"); if (grpCtx != null) wakeupForCheckpoint(PARTITION_DESTROY_CHECKPOINT_TIMEOUT, "partition destroy"); }
/** * */ private void assertCachesAfterStop(IgniteEx igniteEx) { assertNull(igniteEx .context() .cache() .cacheGroup(CU.cacheId(STATIC_CACHE_CACHE_GROUP_NAME))); assertTrue(igniteEx.context().cache().cacheGroups().isEmpty()); for (int i = 0; i < CACHES_COUNT; i++) { assertNull(igniteEx.context().cache().cache(STATIC_CACHE_PREFIX + i)); assertNull(igniteEx.context().cache().internalCache(STATIC_CACHE_PREFIX + i)); } }
/** * Checks WAL disabled for cache group. * * @param grpId Group id. * @return {@code True} if WAL disable for group. {@code False} If not. */ public boolean isDisabled(int grpId) { CacheGroupContext ctx = cctx.cache().cacheGroup(grpId); return ctx != null && !ctx.walEnabled(); }
/** {@inheritDoc} */ @SuppressWarnings("ForLoopReplaceableByForEach") @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException { super.finishUnmarshal(ctx, ldr); CacheGroupContext grp = ctx.cache().cacheGroup(grpId); if (grp == null) return; for (CacheEntryInfoCollection col : infos().values()) { List<GridCacheEntryInfo> entries = col.infos(); for (int i = 0; i < entries.size(); i++) entries.get(i).unmarshal(grp.cacheObjectContext(), ldr); } }
/** */ private List<Future<Map<PartitionKeyV2, PartitionHashRecordV2>>> calcPartitionHashAsync( Set<Integer> grpIds, AtomicBoolean cpFlag ) { List<Future<Map<PartitionKeyV2, PartitionHashRecordV2>>> partHashCalcFutures = new ArrayList<>(); for (Integer grpId : grpIds) { CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(grpId); if (grpCtx == null) continue; List<GridDhtLocalPartition> parts = grpCtx.topology().localPartitions(); for (GridDhtLocalPartition part : parts) partHashCalcFutures.add(calculatePartitionHashAsync(grpCtx, part, cpFlag)); } return partHashCalcFutures; }
/** * Updates partition map in all caches. * * @param nodeId Node message received from. * @param msg Partitions single message. */ private void updatePartitionSingleMap(UUID nodeId, GridDhtPartitionsSingleMessage msg) { msgs.put(nodeId, msg); for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) { Integer grpId = entry.getKey(); CacheGroupContext grp = cctx.cache().cacheGroup(grpId); GridDhtPartitionTopology top = grp != null ? grp.topology() : cctx.exchange().clientTopology(grpId, events().discoveryCache()); top.update(exchId, entry.getValue(), false); } }
/** {@inheritDoc} */ @Override public List<List<ClusterNode>> cachePartitionOwners(String cacheName) { if (cacheName == null) throw new NullPointerException("Null cache name."); DynamicCacheDescriptor cacheDesc = ctx.affinity().caches().get(CU.cacheId(cacheName)); if (cacheDesc == null) throw new IllegalArgumentException("Invalid cache name: " + cacheName); if (cacheDesc.cacheConfiguration().getCacheMode() == CacheMode.LOCAL) return Collections.emptyList(); CacheGroupContext grp = ctx.cache().cacheGroup(cacheDesc.groupId()); GridDhtPartitionTopology top; if (grp == null) { top = ctx.exchange().clientTopologyIfExists(cacheDesc.groupId()); assert top != null : cacheName; } else top = grp.topology(); return top.allOwners(); }
/** * @param gridNumber Grid number. * @param cacheName Cache name. * @return Partitions states for given cache name. */ private List<GridDhtPartitionState> getPartitionsStates(int gridNumber, String cacheName) { CacheGroupContext cgCtx = grid(gridNumber).context().cache().cacheGroup(CU.cacheId(cacheName)); GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl)cgCtx.topology(); return top.localPartitions().stream() .map(GridDhtLocalPartition::state) .collect(Collectors.toList()); }
final int grpId = CU.cacheId(groupName); CacheGroupContext grp = ignite.context().cache().cacheGroup(grpId);
/** * @param topVer Topology version. * @param desc Cache descriptor. * @return Cache holder. * @throws IgniteCheckedException If failed. */ private CacheGroupHolder groupHolder(AffinityTopologyVersion topVer, final CacheGroupDescriptor desc) throws IgniteCheckedException { CacheGroupHolder cacheGrp = grpHolders.get(desc.groupId()); if (cacheGrp != null) return cacheGrp; final CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId()); if (grp == null) { cctx.io().addCacheGroupHandler(desc.groupId(), GridDhtAffinityAssignmentResponse.class, new IgniteBiInClosure<UUID, GridDhtAffinityAssignmentResponse>() { @Override public void apply(UUID nodeId, GridDhtAffinityAssignmentResponse res) { processAffinityAssignmentResponse(nodeId, res); } } ); cacheGrp = CacheGroupHolder2.create(cctx, desc, topVer, null); } else cacheGrp = new CacheGroupHolder1(grp, null); CacheGroupHolder old = grpHolders.put(desc.groupId(), cacheGrp); assert old == null : old; return cacheGrp; }
@Override public void applyx(IgniteInternalFuture future) { for (Integer grpId0 : session0.disabledGrps) { CacheGroupContext grp = cctx.cache().cacheGroup(grpId0); if (grp != null) grp.topology().ownMoving(topVer); else if (log.isDebugEnabled()) log.debug("Cache group was destroyed before checkpoint finished, [grpId=" + grpId0 + ']'); } if (log.isDebugEnabled()) log.debug("Refresh partitions due to rebalance finished"); cctx.exchange().refreshPartitions(); } });
/** * */ private void assertCaches(IgniteEx igniteEx) { Collection<GridCacheContext> caches = igniteEx .context() .cache() .cacheGroup(CU.cacheId(STATIC_CACHE_CACHE_GROUP_NAME)) .caches(); assertEquals(CACHES_COUNT, caches.size()); @Nullable CacheGroupContext cacheGroup = igniteEx .context() .cache() .cacheGroup(CU.cacheId(STATIC_CACHE_CACHE_GROUP_NAME)); for (GridCacheContext cacheContext : caches) assertEquals(cacheContext.group(), cacheGroup); } }
if (!hld.client() && cctx.cache().cacheGroup(hld.groupId()) == null) { int grpId = hld.groupId();
/** * @param fut Exchange future. * @param grpDesc Cache group descriptor. * @throws IgniteCheckedException If failed. */ private void initStartedGroupOnCoordinator(GridDhtPartitionsExchangeFuture fut, final CacheGroupDescriptor grpDesc) throws IgniteCheckedException { assert grpDesc != null && grpDesc.groupId() != 0 : grpDesc; if (grpDesc.config().getCacheMode() == LOCAL) return; int grpId = grpDesc.groupId(); CacheGroupHolder grpHolder = grpHolders.get(grpId); CacheGroupContext grp = cctx.kernalContext().cache().cacheGroup(grpId); if (grpHolder == null) { grpHolder = grp != null ? new CacheGroupHolder1(grp, null) : CacheGroupHolder2.create(cctx, grpDesc, fut.initialVersion(), null); CacheGroupHolder old = grpHolders.put(grpId, grpHolder); assert old == null : old; calculateAndInit(fut.events(), grpHolder.affinity(), fut.initialVersion()); } else if (grpHolder.client() && grp != null) { assert grpHolder.affinity().idealAssignment() != null; grpHolder = new CacheGroupHolder1(grp, grpHolder.affinity()); grpHolders.put(grpId, grpHolder); } }
initStartedGroupOnCoordinator(fut, grpDesc); else { CacheGroupContext grp = cctx.cache().cacheGroup(grpDesc.groupId());
/** */ public void init(GridCacheDatabaseSharedManager db) throws IgniteCheckedException { regionMetrics.clear(); initInternal(db); if (!PRESERVE_LEGACY_METASTORAGE_PARTITION_ID) { GridCacheProcessor gcProcessor = cctx.kernalContext().cache(); if (partId == OLD_METASTORE_PARTITION) gcProcessor.setTmpStorage(copyDataToTmpStorage()); else if (gcProcessor.getTmpStorage() != null) { restoreDataFromTmpStorage(gcProcessor.getTmpStorage()); gcProcessor.setTmpStorage(null); // remove old partitions CacheGroupContext cgc = cctx.cache().cacheGroup(METASTORAGE_CACHE_ID); if (cgc != null) { db.schedulePartitionDestroy(METASTORAGE_CACHE_ID, OLD_METASTORE_PARTITION); db.schedulePartitionDestroy(METASTORAGE_CACHE_ID, PageIdAllocator.INDEX_PARTITION); } } } }
/** * */ private void checkUpdCounter(IgniteEx ignite, List<String> issues, HashMap<Integer, Long> partMap) { final CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(CU.cacheId(CACHE_NAME)); assertNotNull(grpCtx); GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl)grpCtx.topology(); List<GridDhtLocalPartition> locParts = top.localPartitions(); for (GridDhtLocalPartition part : locParts) { Long cnt = partMap.get(part.id()); if (cnt == null) partMap.put(part.id(), part.updateCounter()); if ((cnt != null && part.updateCounter() != cnt) || part.updateCounter() == 0) issues.add("Node name " + ignite.name() + "Part = " + part.id() + " updCounter " + part.updateCounter()); } } }
/** * Creates failover predicate which generates error during transaction commmit. * * @param failOnPrimary If {@code true} index should be failed on transaction primary node, otherwise on backup. * @param errorSupplier Supplier to create various errors. * @param errorConsumer Consumer to track unexpected errors while committing. */ private BiFunction<IgniteEx, SearchRow, Throwable> failoverPredicate( boolean failOnPrimary, Supplier<Throwable> errorSupplier, Consumer<Throwable> errorConsumer ) { return (ignite, row) -> { try { int cacheId = row.cacheId(); int partId = row.key().partition(); GridDhtPartitionTopology top = ignite.context().cache().cacheGroup(cacheId).topology(); GridDhtLocalPartition part = top.localPartition(partId); assertTrue("Illegal partition state for mapped tx: " + part, part != null && part.state() == OWNING); return part.primary(top.readyTopologyVersion()) == failOnPrimary ? errorSupplier.get() : null; } catch (Throwable e) { errorConsumer.accept(e); throw e; } }; }
/** * @param client Client flag. * @param canWrite Can write flag. * @param safe Safe flag. * @throws Exception If failed to start a new node. */ private void checkNewNode( boolean client, boolean canWrite, boolean safe ) throws Exception { this.client = client; try { IgniteEx cl = (IgniteEx)startGrid("newNode"); CacheGroupContext grpCtx = cl.context().cache().cacheGroup(CU.cacheId(DEFAULT_CACHE_NAME)); assertTrue(grpCtx.needsRecovery()); verifyCacheOps(canWrite, safe, cl); validateQuery(safe, cl); } finally { stopGrid("newNode", false); this.client = false; } }
/** * */ private void assignPartitionsStates() { try { U.doInParallel( cctx.kernalContext().getSystemExecutorService(), nonLocalCacheGroupDescriptors(), grpDesc -> { CacheGroupContext grpCtx = cctx.cache().cacheGroup(grpDesc.groupId()); GridDhtPartitionTopology top = grpCtx != null ? grpCtx.topology() : cctx.exchange().clientTopology(grpDesc.groupId(), events().discoveryCache()); if (!CU.isPersistentCache(grpDesc.config(), cctx.gridConfig().getDataStorageConfiguration())) assignPartitionSizes(top); else assignPartitionStates(top); return null; } ); } catch (IgniteCheckedException e) { throw new IgniteException("Failed to assign partition states", e); } timeBag.finishGlobalStage("Assign partitions states"); }