/** * This method returns name of this Partitioned Region * * @return Partitioned Region name */ private String getName() { return this.partitionedRegion.getName(); }
/** * This method returns name of this Partitioned Region * * @return Partitioned Region name */ private String getName() { return this.partitionedRegion.getName(); }
public String getName() { return this.proxy.getName(); }
public String getName() { return this.proxy.getName(); }
@Override public String toString() { final StringBuilder buf = new StringBuilder(); buf.append("[ PartitionedRegionFunctionExecutor:"); buf.append("args="); buf.append(this.args); buf.append(";filter="); buf.append(this.filter); buf.append(";region="); buf.append(this.pr.getName()); buf.append("]"); return buf.toString(); } /* (non-Javadoc)
/** * Convenience method to get region name for logging/exception messages. * if this region is an instanceof bucket region, it returns the * bucket region name * @return name of the region or the owning partitioned region */ public String getDisplayName() { if (this.isUsedForPartitionedRegionBucket()) { return this.getPartitionedRegion().getName(); } return this.regionName; }
/** * Convenience method to get region name for logging/exception messages. * if this region is an instanceof bucket region, it returns the * bucket region name * @return name of the region or the owning partitioned region */ public String getDisplayName() { if (this.isUsedForPartitionedRegionBucket()) { return this.getPartitionedRegion().getName(); } return this.regionName; }
@Override public int size() { int size = 0; for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if (logger.isDebugEnabled()) { logger.debug("The name of the queue region is {} and the size is {}. keyset size is {}", prQ.getName(), prQ.size(), prQ.keys().size()); } size += prQ.size(); } return size + sender.getTmpQueuedEventSize(); }
/** * Execute VerifyCustomPartitioningFunction on all PRs */ public static void HydraTask_executeVerifyCustomPartitioningFunction() { Function f = FunctionService.getFunction("parReg.tx.VerifyCustomPartitioningFunction"); Cache myCache = CacheHelper.getCache(); Set allRegions = myCache.rootRegions(); for (Iterator it = allRegions.iterator(); it.hasNext();) { PartitionedRegion pr = (PartitionedRegion)it.next(); if (!(pr.getName().equalsIgnoreCase("routingRegion"))) { testInstance.executeVerifyCustomPartitioningFunction(pr, f); } } }
@Override public void flushHDFSQueue(int maxWaitTime) { if (!this.isHDFSRegion()) { throw new UnsupportedOperationException( LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE .toLocalizedString(getName())); } HDFSFlushQueueFunction.flushQueue(this, maxWaitTime); }
@Override public void flushHDFSQueue(int maxWaitTime) { if (!this.isHDFSRegion()) { throw new UnsupportedOperationException( LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE .toLocalizedString(getName())); } HDFSFlushQueueFunction.flushQueue(this, maxWaitTime); }
protected void logRegionSize(PartitionedRegion aRegion) { hydra.Log.getLogWriter().info( "Region " + aRegion.getName() + " has expected size " + aRegion.size()); }
public void run2() throws CacheException { Cache cache = getCache(); PartitionedRegion pr = (PartitionedRegion)cache.getRegion(Region.SEPARATOR + PR_PREFIX + "1"); assertNotNull("Null region is " + pr.getName(),pr); pr.destroyRegion(); pr = (PartitionedRegion)cache.getRegion(Region.SEPARATOR + PR_PREFIX ); assertNotNull("Null region is " + pr.getName(),pr); pr.destroyRegion(); } };
@Override public int size() { int size = 0; for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if (logger.finerEnabled()) { logger.finer("The name of the queue region is " + prQ.getName() + " and the size is " + prQ.size() + " keyset size is " + prQ.keys().size()); } size += prQ.size(); } return size + sender.getTmpQueuedEventSize(); }
public void afterPrimary(int id) { for (int i = 2; i <= NUM_COLOCATED_REGIONS; i++) { PartitionedRegion viewPR = (PartitionedRegion)theCache .getRegion(Region.SEPARATOR + "clientRegion" + i); hydra.Log.getLogWriter().info( "Creating bucket with id " + id + " for the region " + viewPR.getName()); PartitionManager.createPrimaryBucket(viewPR, id, true, true); } }
private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) { BucketRegion br = region.getDataStore().getLocalBucketById(bucketId); if (br == null) { // got rebalanced or something throw new BucketMovedException("Bucket region is no longer available", bucketId, region.getName()); } return br.getHoplogOrganizer(); }
private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) { BucketRegion br = region.getDataStore().getLocalBucketById(bucketId); if (br == null) { // got rebalanced or something throw new BucketMovedException("Bucket region is no longer available. BucketId: "+ bucketId + " HdfsRegion: " + region.getName()); } return br.getHoplogOrganizer(); }
public void run2() { Cache cache = getCache(); PartitionedRegion pr = (PartitionedRegion)cache .getRegion(Region.SEPARATOR + "testLocalMaxMemoryInPartitionedRegion0"); assertNotNull("Name of region : " + pr.getName(), pr); pr.destroyRegion(); } };
public void run2() { Cache cache = getCache(); for (int i = innerStartIndexForRegion; i < innerEndIndexForRegion; i++) { PartitionedRegion pr = (PartitionedRegion)cache .getRegion(Region.SEPARATOR + innerPrPrefix + i); assertNotNull("This Partition Region is null " + pr.getName(), pr); assertNull("DataStore should be null", pr.getDataStore()); } } };
/** * Task to initialize the cache and the region */ protected void initialize(String regionDescriptName) { theCache = CacheHelper.createCache("cache1"); String regionName = RegionHelper.getRegionDescription(regionDescriptName) .getRegionName(); Log.getLogWriter().info("Creating region " + regionName); RegionAttributes attributes = RegionHelper .getRegionAttributes(regionDescriptName); aRegion = (PartitionedRegion)theCache.createRegion(regionName, attributes); Log.getLogWriter().info("Completed creating region " + aRegion.getName()); }