@Override protected boolean hasStorage() { return this.getLocalMaxMemory() != 0; }
/** set fields that are only in PartitionProfile... */ public void fillInProfile(PartitionProfile profile) { // both isDataStore and numBuckets are not required for sending purposes, // but nice to have for toString debugging profile.isDataStore = getLocalMaxMemory() > 0; if (this.dataStore != null) { profile.numBuckets = this.dataStore.getBucketsManaged(); } profile.requiresNotification = this.requiresNotification; profile.localMaxMemory = getLocalMaxMemory(); profile.fixedPAttrs = this.fixedPAttrs; // shutdownAll profile.shutDownAllStatus = shutDownAllStatus; }
/** set fields that are only in PartitionProfile... */ public void fillInProfile(PartitionProfile profile) { // both isDataStore and numBuckets are not required for sending purposes, // but nice to have for toString debugging profile.isDataStore = getLocalMaxMemory() > 0; if (this.dataStore != null) { profile.numBuckets = this.dataStore.getBucketsManaged(); } profile.requiresNotification = this.requiresNotification; profile.localMaxMemory = getLocalMaxMemory(); profile.fixedPAttrs = this.fixedPAttrs; // shutdownAll profile.shutDownAllStatus = shutDownAllStatus; }
private String getLocalMaxMemoryLogMessage(String logStr, long localBytesInUse) { return "Partitioned Region " + this.partitionedRegion.getFullPath() + logStr + this.partitionedRegion.getLocalMaxMemory() + " Mb, current size is " + (localBytesInUse / PartitionedRegionHelper.BYTES_PER_MB) + " Mb"; }
/** * This starts creating the index. */ public PartitionedIndex call() throws IndexCreationException, IndexNameConflictException, IndexExistsException, ForceReattemptException { // List list = p_list; PartitionedIndex prIndex = null; if (dataStore != null){ prIndex = createIndexOnPRBuckets(); } else { if (getLocalMaxMemory() != 0 ) { throw new IndexCreationException(LocalizedStrings. PartitionedRegion_DATA_STORE_ON_THIS_VM_IS_NULL_AND_THE_LOCAL_MAX_MEMORY_IS_NOT_ZERO_0.toLocalizedString( Long.valueOf(getLocalMaxMemory()))); } logger.info(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_THIS_IS_AN_ACCESSOR_VM_AND_DOESNT_CONTAIN_DATA)); prIndex = new PartitionedIndex(indexType, indexName, PartitionedRegion.this, indexedExpression, fromClause, imports); } hasPartitionedIndex = true; return prIndex; }
/** * This starts creating the index. */ public PartitionedIndex call() throws IndexCreationException, IndexNameConflictException, IndexExistsException, ForceReattemptException { // List list = p_list; PartitionedIndex prIndex = null; if (dataStore != null){ prIndex = createIndexOnPRBuckets(); } else { if (getLocalMaxMemory() != 0 ) { throw new IndexCreationException(LocalizedStrings. PartitionedRegion_DATA_STORE_ON_THIS_VM_IS_NULL_AND_THE_LOCAL_MAX_MEMORY_IS_NOT_ZERO_0.toLocalizedString( Long.valueOf(getLocalMaxMemory()))); } logger.info(LocalizedStrings.PartitionedRegion_THIS_IS_AN_ACCESSOR_VM_AND_DOESNT_CONTAIN_DATA); prIndex = new PartitionedIndex(indexType, indexName, PartitionedRegion.this, indexedExpression, fromClause, imports); } hasPartitionedIndex = true; return prIndex; }
/** * The 3rd step of EvictionAttributes validation, where mutation is acceptible * This should be done before buckets are created. Validate EvictionAttributes * with respect to localMaxMemory potentially changing the eviction * attributes. * * @see AttributesFactory#validateAttributes(RegionAttributes) * @see #validateDistributedEvictionAttributes(EvictionAttributes) */ void validateEvictionAttributesAgainstLocalMaxMemory() { final EvictionAttributes ea = pr.getEvictionAttributes(); if (pr.getLocalMaxMemory()==0 && !ea.getAction().isNone()) { // This is an accessor which won't ever do eviction, say so logger.info(LocalizedMessage.create( LocalizedStrings.PartitionedRegion_EVICTIONATTRIBUTES_0_WILL_HAVE_NO_EFFECT_1_2, new Object[] { ea, pr.getFullPath(), Integer.valueOf(pr.localMaxMemory)})); } }
protected void updateMemoryStats(final long memoryDelta) { // Update stats this.partitionedRegion.getPrStats().incBytesInUse(memoryDelta); final long locBytes = this.bytesInUse.addAndGet(memoryDelta); if(!this.partitionedRegion.isEntryEvictionPossible()) { StringId logStr = null; //only check for exceeding local max memory if we're not evicting entries. // TODO, investigate precision issues with cast to long if (!this.exceededLocalMaxMemoryLimit) { // previously OK if (locBytes > this.maximumLocalBytes) { // not OK now this.exceededLocalMaxMemoryLimit = true; logStr = LocalizedStrings.PartitionedRegionDataStore_PARTITIONED_REGION_0_HAS_EXCEEDED_LOCAL_MAXIMUM_MEMORY_CONFIGURATION_2_MB_CURRENT_SIZE_IS_3_MB; } } else { if (locBytes <= this.maximumLocalBytes) { this.exceededLocalMaxMemoryLimit = false; logStr = LocalizedStrings.PartitionedRegionDataStore_PARTITIONED_REGION_0_IS_AT_OR_BELOW_LOCAL_MAXIMUM_MEMORY_CONFIGURATION_2_MB_CURRENT_SIZE_IS_3_MB; } } if (logStr != null) { Object[] logArgs = new Object[] {this.partitionedRegion.getFullPath(), logStr, Long.valueOf(this.partitionedRegion.getLocalMaxMemory()), Long.valueOf(locBytes / PartitionedRegionHelper.BYTES_PER_MB)}; if (this.exceededLocalMaxMemoryLimit) { logger.warn(LocalizedMessage.create(logStr, logArgs)); } else { logger.info(LocalizedMessage.create(logStr, logArgs)); } } } }
/** * validates the persistence for datastores should match * between members */ void validatePersistentMatchBetweenDataStores(PartitionRegionConfig prconf) { final boolean isPersistent = pr.getAttributes().getDataPolicy() == DataPolicy.PERSISTENT_PARTITION; if (pr.getLocalMaxMemory()==0 || prconf==null) { return; } Set<Node> nodes = prconf.getNodes(); Iterator itor = nodes.iterator(); while (itor.hasNext()) { Node n = (Node)itor.next(); if (n.getPRType() != Node.ACCESSOR_DATASTORE) { continue; } else { if (n.isPersistent() != (pr.getAttributes().getDataPolicy() == DataPolicy.PERSISTENT_PARTITION)) { throw new IllegalStateException( "DataPolicy for Datastore members should all be persistent or not."); } } } }
/** * The 3rd step of EvictionAttributes validation, where mutation is acceptible * This should be done before buckets are created. Validate EvictionAttributes * with respect to localMaxMemory potentially changing the eviction * attributes. * * @see AttributesFactory#validateAttributes(RegionAttributes) * @see #validateDistributedEvictionAttributes(EvictionAttributes) */ void validateEvictionAttributesAgainstLocalMaxMemory() { final EvictionAttributes ea = pr.getEvictionAttributes(); if (pr.getLocalMaxMemory()==0 && !ea.getAction().isNone()) { // This is an accessor which won't ever do eviction, say so pr.logger.info( LocalizedStrings. PartitionedRegion_EVICTIONATTRIBUTES_0_WILL_HAVE_NO_EFFECT_1_2, new Object[] { ea, pr.getFullPath(), Integer.valueOf(pr.localMaxMemory)}); } }
@Override public void run2() throws CacheException { final long ONE_MEG = 1024L * 1024L; final PartitionedRegion pr1 = (PartitionedRegion)cache.getRegion("PR1"); final PartitionedRegion pr2 = (PartitionedRegion)cache.getRegion("PR2"); assertEquals(pr1.getLocalMaxMemory(), ((AbstractLRURegionMap)pr1.entries) ._getLruList().stats().getLimit() / ONE_MEG); assertEquals(pr2.getLocalMaxMemory(), ((AbstractLRURegionMap)pr2.entries) ._getLruList().stats().getLimit() / ONE_MEG); assertEquals(1000, ((AbstractLRURegionMap)pr1.entries)._getLruList() .stats().getDestroysLimit()); assertEquals(1000, ((AbstractLRURegionMap)pr2.entries)._getLruList() .stats().getDestroysLimit()); } });
@Override public void run2() throws CacheException { final long ONE_MEG = 1024L * 1024L; final PartitionedRegion pr1 = (PartitionedRegion)cache.getRegion("PR1"); final PartitionedRegion pr2 = (PartitionedRegion)cache.getRegion("PR2"); assertEquals(pr1.getLocalMaxMemory(), ((AbstractLRURegionMap)pr1.entries) ._getLruList().stats().getLimit() / ONE_MEG); assertEquals(pr2.getLocalMaxMemory(), ((AbstractLRURegionMap)pr2.entries) ._getLruList().stats().getLimit() / ONE_MEG); assertEquals(1000, ((AbstractLRURegionMap)pr1.entries)._getLruList() .stats().getDestroysLimit()); assertEquals(1000, ((AbstractLRURegionMap)pr2.entries)._getLruList() .stats().getDestroysLimit()); } });
/** * Creates PartitionedRegionDataStore for dataStorage of PR and starts a * PartitionService to handle remote operations on this DataStore from other * participating nodes. * * @param pr * PartitionedRegion associated with this DataStore. */ PartitionedRegionDataStore(final PartitionedRegion pr) { final int bucketCount = pr.getTotalNumberOfBuckets(); this.localBucket2RegionMap = new ConcurrentHashMap<Integer, BucketRegion>(bucketCount); this.partitionedRegion = pr; this.bucketCreationLock = new StoppableReentrantReadWriteLock(pr.getCancelCriterion()); if (pr.getAttributes().getCacheLoader() != null) { this.loader = pr.getAttributes().getCacheLoader(); if (logger.isDebugEnabled()) { logger.debug("Installing cache loader from partitioned region attributes: {}", loader); } } // this.maximumLocalBytes = (long) (pr.getLocalMaxMemory() * PartitionedRegionHelper.BYTES_PER_MB // * this.partitionedRegion.rebalanceThreshold); this.maximumLocalBytes = (pr.getLocalMaxMemory() * PartitionedRegionHelper.BYTES_PER_MB); // this.bucketStats = new CachePerfStats(pr.getSystem(), "partition-" + pr.getName()); this.bucketStats = new RegionPerfStats(pr.getCache(), pr.getCachePerfStats(), "partition-" + pr.getName()); this.keysOfInterest = new ConcurrentHashMap(); }
public PRLoad getLoad(PartitionedRegion pr) { PartitionedRegionDataStore ds = pr.getDataStore(); int configuredBucketCount = pr.getTotalNumberOfBuckets(); PRLoad prLoad = new PRLoad( configuredBucketCount, pr.getLocalMaxMemory()); // key: bid, value: size for(Integer bidInt : ds.getAllLocalBucketIds()) { int bid = bidInt.intValue(); BucketAdvisor bucketAdvisor = pr.getRegionAdvisor(). getBucket(bid).getBucketAdvisor(); //Wait for a primary to exist for this bucket, because //it might be this member. bucketAdvisor.getPrimary(); boolean isPrimary = pr.getRegionAdvisor(). getBucket(bid).getBucketAdvisor().isPrimary(); prLoad.addBucket(bid, 1, isPrimary ? 1 : 0); } return prLoad; }
public PRLoad getLoad(PartitionedRegion pr) { PartitionedRegionDataStore ds = pr.getDataStore(); int configuredBucketCount = pr.getTotalNumberOfBuckets(); PRLoad prLoad = new PRLoad( configuredBucketCount, pr.getLocalMaxMemory()); // key: bid, value: size for(Integer bidInt : ds.getAllLocalBucketIds()) { int bid = bidInt.intValue(); BucketAdvisor bucketAdvisor = pr.getRegionAdvisor(). getBucket(bid).getBucketAdvisor(); //Wait for a primary to exist for this bucket, because //it might be this member. bucketAdvisor.getPrimary(); boolean isPrimary = pr.getRegionAdvisor(). getBucket(bid).getBucketAdvisor().isPrimary(); prLoad.addBucket(bid, 1, isPrimary ? 1 : 0); } return prLoad; }
if (policy.withPartitioning()) { PartitionedRegion pr = (PartitionedRegion)baseRegion; assert pr.getLocalMaxMemory() > 0: "Executing " + "a query in non data store node."; bucketId = PartitionedRegionHelper.getHashKey(pr, Operation.GET_ENTRY,
public PRLoad getLoad(PartitionedRegion pr) { PartitionedRegionDataStore ds = pr.getDataStore(); int configuredBucketCount = pr.getTotalNumberOfBuckets(); PRLoad prLoad = new PRLoad( configuredBucketCount, pr.getLocalMaxMemory()); // key: bid, value: size for(Integer bidInt : ds.getAllLocalBucketIds()) { int bid = bidInt.intValue(); long bucketSize = ds.getBucketSize(bid); if(bucketSize < MIN_BUCKET_SIZE) { bucketSize = MIN_BUCKET_SIZE; } BucketAdvisor bucketAdvisor = pr.getRegionAdvisor(). getBucket(bid).getBucketAdvisor(); //Wait for a primary to exist for this bucket, because //it might be this member. bucketAdvisor.getPrimary(); boolean isPrimary = pr.getRegionAdvisor(). getBucket(bid).getBucketAdvisor().isPrimary(); prLoad.addBucket(bid, bucketSize, isPrimary ? 1 : 0); } return prLoad; }
public PRLoad getLoad(PartitionedRegion pr) { PartitionedRegionDataStore ds = pr.getDataStore(); int configuredBucketCount = pr.getTotalNumberOfBuckets(); PRLoad prLoad = new PRLoad( configuredBucketCount, pr.getLocalMaxMemory()); // key: bid, value: size for(Integer bidInt : ds.getAllLocalBucketIds()) { int bid = bidInt.intValue(); long bucketSize = ds.getBucketSize(bid); if(bucketSize < MIN_BUCKET_SIZE) { bucketSize = MIN_BUCKET_SIZE; } BucketAdvisor bucketAdvisor = pr.getRegionAdvisor(). getBucket(bid).getBucketAdvisor(); //Wait for a primary to exist for this bucket, because //it might be this member. bucketAdvisor.getPrimary(); boolean isPrimary = pr.getRegionAdvisor(). getBucket(bid).getBucketAdvisor().isPrimary(); prLoad.addBucket(bid, bucketSize, isPrimary ? 1 : 0); } return prLoad; }
@Override public void run2() throws CacheException { PartitionedRegion pr1 = (PartitionedRegion)cache.getRegion("PR1"); getLogWriter().info("dddd local"+pr1.getLocalMaxMemory()); getLogWriter().info("dddd local evi"+((AbstractLRURegionMap)pr1.entries)._getLruList().stats() .getEvictions()); getLogWriter().info("dddd local entries"+((AbstractLRURegionMap)pr1.entries)._getLruList().stats() .getCounter()/(1024*1024)); HeapMemoryMonitor hmm = ((InternalResourceManager) cache.getResourceManager()).getHeapMonitor(); long memused=hmm.getBytesUsed()/(1024*1024); getLogWriter().info("dddd local memused= "+memused); assertTrue(((AbstractLRURegionMap)pr1.entries)._getLruList().stats() .getEvictions() >= extraEntries / 2); assertEquals(((AbstractLRURegionMap)pr1.entries)._getLruList().stats() .getDestroys(), ((AbstractLRURegionMap)pr1.entries)._getLruList() .stats().getEvictions()); PartitionedRegion pr2 = (PartitionedRegion)cache.getRegion("PR2"); assertTrue(((AbstractLRURegionMap)pr2.entries)._getLruList().stats() .getEvictions() >= extraEntries / 2); assertEquals(((AbstractLRURegionMap)pr2.entries)._getLruList().stats() .getDestroys(), 0); assertTrue(pr2.getDiskRegionStats().getNumOverflowOnDisk()>= extraEntries / 2); } });
public void testMemLruForPRAndDR() { createCache(); createPartitionedRegion(true, EvictionAlgorithm.LRU_MEMORY, "PR1", 4, 1, 1000,40); createDistRegionWithMemEvictionAttr(); PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); DistributedRegion dr = (DistributedRegion)cache.getRegion("DR1"); assertEquals(pr.getLocalMaxMemory(), pr.getEvictionAttributes().getMaximum()); assertEquals(MemLRUCapacityController.DEFAULT_MAXIMUM_MEGABYTES, dr.getEvictionAttributes().getMaximum()); for (int i = 0; i < 41; i++) { pr.put(new Integer(i), new byte[1 * 1024 * 1024]); } assertTrue(1<=((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); assertTrue(((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()<=2); for (int i = 0; i < 11; i++) { dr.put(new Integer(i), new byte[1 * 1024 * 1024]); } assertTrue(1<=((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions()); assertTrue(((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions()<=2); }