/** {@inheritDoc} */ @Override public GridCursor<CacheDataRow> find( CacheSearchRow lower, CacheSearchRow upper, TreeRowClosure<CacheSearchRow,CacheDataRow> c, Object x ) throws IgniteCheckedException { // If there is a group of caches, lower and upper bounds will not be null here. if (lower == null && upper == null && grp.persistenceEnabled() && dataPageScanEnabled.get() && (c == null || c instanceof MvccDataPageClosure)) return scanDataPages(asRowData(x), (MvccDataPageClosure)c); lastFindWithDataPageScan = FALSE; return super.find(lower, upper, c, x); }
/** * See {@link IdleVerifyUtility#checkPartitionsPageCrcSum(FilePageStore, CacheGroupContext, int, byte, * AtomicBoolean)}. */ public static void checkPartitionsPageCrcSum( @Nullable FilePageStoreManager pageStoreMgr, CacheGroupContext grpCtx, int partId, byte pageType, AtomicBoolean cpFlag ) throws IgniteCheckedException, GridNotIdleException { if (!grpCtx.persistenceEnabled() || pageStoreMgr == null) return; FilePageStore pageStore = (FilePageStore)pageStoreMgr.getStore(grpCtx.groupId(), partId); checkPartitionsPageCrcSum(pageStore, grpCtx, partId, pageType, cpFlag); }
/** * Method dumps partitions info see {@link #dumpPartitionsInfo(CacheGroupContext, IgniteLogger)} * for all persistent cache groups. * * @param cctx Shared context. * @param log Logger. * @throws IgniteCheckedException If failed. */ private static void dumpPartitionsInfo(GridCacheSharedContext cctx, IgniteLogger log) throws IgniteCheckedException { for (CacheGroupContext grp : cctx.cache().cacheGroups()) { if (grp.isLocal() || !grp.persistenceEnabled()) continue; dumpPartitionsInfo(grp, log); } }
final CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(grpId); if (grpCtx == null || !grpCtx.persistenceEnabled()) { integrityCheckedIndexes.incrementAndGet();
/** * For testing purposes only. * @param toState State to set. */ public void setState(GridDhtPartitionState toState) { if (grp.persistenceEnabled() && grp.walEnabled()) { synchronized (this) { long state0 = state.get(); this.state.compareAndSet(state0, setPartState(state0, toState)); try { ctx.wal().log(new PartitionMetaStateRecord(grp.groupId(), id, toState, updateCounter())); } catch (IgniteCheckedException e) { U.error(log, "Error while writing to log", e); } } } else restoreState(toState); }
/** * Checks if local reads are allowed for the given partition and reserves the partition when needed. If this * method returns {@code true}, then {@link #releaseForFastLocalGet(int, AffinityTopologyVersion)} method * must be called after the read is completed. * * @param part Partition. * @param topVer Topology version. * @return {@code True} if cache 'get' operation is allowed to get entry locally. */ public boolean reserveForFastLocalGet(int part, AffinityTopologyVersion topVer) { boolean result = affinityNode() && rebalanceEnabled() && checkAndReservePartition(part, topVer); // When persistence is enabled, only reading from partitions with OWNING state is allowed. assert !result || !group().persistenceEnabled() || topology().partitionState(localNodeId(), part) == OWNING : "result=" + result + ", persistenceEnabled=" + group().persistenceEnabled() + ", partitionState=" + topology().partitionState(localNodeId(), part) + ", replicated=" + isReplicated() + ", part=" + part; return result; }
CacheGroupContext gctx = tup.get1(); if (!gctx.persistenceEnabled()) continue;
/** * Releases the partition that was reserved by a call to * {@link #reserveForFastLocalGet(int, AffinityTopologyVersion)}. * * @param part Partition to release. * @param topVer Topology version. */ public void releaseForFastLocalGet(int part, AffinityTopologyVersion topVer) { assert affinityNode(); if (!isReplicated() || group().persistenceEnabled()) { GridDhtLocalPartition locPart = topology().localPartition(part, topVer, false); assert locPart != null && locPart.state() == OWNING : "partition evicted after reserveForFastLocalGet " + "[part=" + part + ", locPart=" + locPart + ", topVer=" + topVer + ']'; locPart.release(); } }
@Override public boolean apply(@Nullable CacheDataRow row) { boolean update0; GridCacheVersion currentVer = row != null ? row.version() : GridCacheMapEntry.this.ver; boolean isStartVer = cctx.shared().versions().isStartVersion(currentVer); if (cctx.group().persistenceEnabled()) { if (!isStartVer) { if (cctx.atomic()) update0 = ATOMIC_VER_COMPARATOR.compare(currentVer, ver) < 0; else update0 = currentVer.compareTo(ver) < 0; } else update0 = true; } else update0 = isStartVer; update0 |= (!preload && deletedUnlocked()); return update0; } };
/** * @param cctx Cache context. * @throws IgniteCheckedException If failed. */ protected void initPendingTree(GridCacheContext cctx) throws IgniteCheckedException { assert !cctx.group().persistenceEnabled(); if (cctx.affinityNode() && cctx.ttl().eagerTtlEnabled() && pendingEntries == null) { String name = "PendingEntries"; long rootPage = allocateForTree(); pendingEntries = new PendingEntriesTree( grp, name, grp.dataRegion().pageMemory(), rootPage, grp.reuseList(), true); } }
if (!grp.persistenceEnabled() || !grp.affinityNode()) { if (!grp.persistenceEnabled()) log.info("Skip pending tree upgrade for non-persistent cache group: [grpId=" + grp.groupId() + ", grpName=" + grp.name() + ']');
boolean rmvIdx = !cache.context().group().persistenceEnabled();
/** * @param cacheId ID of cache initiated counter update. * @param topVer Topology version for current operation. * @return Next update index. */ public long nextUpdateCounter(int cacheId, AffinityTopologyVersion topVer, boolean primary, @Nullable Long primaryCntr) { long nextCntr = store.nextUpdateCounter(); if (grp.sharedGroup()) grp.onPartitionCounterUpdate(cacheId, id, primaryCntr != null ? primaryCntr : nextCntr, topVer, primary); // This is first update in partition, we should log partition state information for further crash recovery. if (nextCntr == 1) { if (grp.persistenceEnabled() && grp.walEnabled()) try { ctx.wal().log(new PartitionMetaStateRecord(grp.groupId(), id, state(), 0)); } catch (IgniteCheckedException e) { U.error(log, "Failed to log partition state snapshot to WAL.", e); ctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); } } return nextCntr; }
if (isReplicated() && !group().persistenceEnabled()) { boolean rebFinished = top.rebalanceFinished(topVer);
if (grpCtx.persistenceEnabled()) { FilePageStore pageStore = null;
if (grp.persistenceEnabled() && grp.walEnabled()) { synchronized (this) { GridDhtPartitionState prevState = state();
/** * @param op Update operation. * @param val Write value. * @param writeVer Write version. * @param expireTime Expire time. * @param updCntr Update counter. */ protected void logUpdate(GridCacheOperation op, CacheObject val, GridCacheVersion writeVer, long expireTime, long updCntr) throws IgniteCheckedException { // We log individual updates only in ATOMIC cache. assert cctx.atomic(); try { if (cctx.group().persistenceEnabled() && cctx.group().walEnabled()) cctx.shared().wal().log(new DataRecord(new DataEntry( cctx.cacheId(), key, val, op, null, writeVer, expireTime, partition(), updCntr))); } catch (StorageException e) { throw new IgniteCheckedException("Failed to log ATOMIC cache update [key=" + key + ", op=" + op + ", val=" + val + ']', e); } }
/** * @throws IgniteCheckedException If failed. */ public void start() throws IgniteCheckedException { aff = new GridAffinityAssignmentCache(ctx.kernalContext(), cacheOrGroupName(), grpId, ccfg.getAffinity(), ccfg.getNodeFilter(), ccfg.getBackups(), ccfg.getCacheMode() == LOCAL, persistenceEnabled()); if (ccfg.getCacheMode() != LOCAL) top = new GridDhtPartitionTopologyImpl(ctx, this); try { offheapMgr = persistenceEnabled ? new GridCacheOffheapManager() : new IgniteCacheOffheapManagerImpl(); } catch (Exception e) { throw new IgniteCheckedException("Failed to initialize offheap manager", e); } offheapMgr.start(ctx, this); if (!isRecoveryMode()) { initializeIO(); ctx.affinity().onCacheGroupCreated(this); } }
if (!cctx.group().persistenceEnabled()) return null;
if (cctx.group().persistenceEnabled() && (cfg.isWriteThrough() || cfg.isReadThrough())) U.quietAndWarn(log, "Both Ignite native persistence and CacheStore are configured for cache '" + cfg.getName() + "'. " +