public int getSizeOfLocalPrimaryBuckets() { int sizeOfLocalPrimaries = 0; Set<BucketRegion> primaryBuckets = getAllLocalPrimaryBucketRegions(); for (BucketRegion br : primaryBuckets) { sizeOfLocalPrimaries += br.size(); } return sizeOfLocalPrimaries; }
private Map<Integer, SizeEntry> getSizeLocallyForPrimary(Collection<Integer> bucketIds, boolean estimate) { Map<Integer, SizeEntry> mySizeMap; if (this.localBucket2RegionMap.isEmpty()) { return Collections.emptyMap(); } mySizeMap = new HashMap<Integer, SizeEntry>(this.localBucket2RegionMap.size()); BucketRegion r = null; for(Integer bucketId : bucketIds) { try { r = getInitializedBucketForId(null, bucketId); mySizeMap.put(bucketId, new SizeEntry(estimate ? r.sizeEstimate() : r.size(), r.getBucketAdvisor().isPrimary())); // if (getLogWriter().fineEnabled() && r.getBucketAdvisor().isPrimary()) { // r.verifyTombstoneCount(); // } } catch (PrimaryBucketException skip) { // sizeEstimate() will throw this exception as it will not try to read from HDFS on a secondary bucket, // this bucket will be retried in PartitionedRegion.getSizeForHDFS() fixes bug 49033 continue; } catch (ForceReattemptException skip) { continue; } catch(RegionDestroyedException skip) { continue; } } // while return mySizeMap; }
public int getSizeOfLocalPrimaryBuckets() { int sizeOfLocalPrimaries = 0; Set<BucketRegion> primaryBuckets = getAllLocalPrimaryBucketRegions(); for (BucketRegion br : primaryBuckets) { sizeOfLocalPrimaries += br.size(); } return sizeOfLocalPrimaries; }
private Map<Integer, SizeEntry> getSizeLocallyForPrimary(Collection<Integer> bucketIds, boolean estimate) { Map<Integer, SizeEntry> mySizeMap; if (this.localBucket2RegionMap.isEmpty()) { return Collections.emptyMap(); } mySizeMap = new HashMap<Integer, SizeEntry>(this.localBucket2RegionMap.size()); BucketRegion r = null; for(Integer bucketId : bucketIds) { try { r = getInitializedBucketForId(null, bucketId); mySizeMap.put(bucketId, new SizeEntry(estimate ? r.sizeEstimate() : r.size(), r.getBucketAdvisor().isPrimary())); // if (getLogWriter().fineEnabled() && r.getBucketAdvisor().isPrimary()) { // r.verifyTombstoneCount(); // } } catch (PrimaryBucketException skip) { // sizeEstimate() will throw this exception as it will not try to read from HDFS on a secondary bucket, // this bucket will be retried in PartitionedRegion.getSizeForHDFS() fixes bug 49033 continue; } catch (ForceReattemptException skip) { continue; } catch(RegionDestroyedException skip) { continue; } } // while return mySizeMap; }
@Override public int size() { int size = 0; for (Integer bId : buckets) { BucketRegion br = proxy.getDataStore().getLocalBucketById(bId); size+=br.size(); } return size; }
@Override public int size() { int size = 0; for (Integer bId : buckets) { BucketRegion br = proxy.getDataStore().getLocalBucketById(bId); size+=br.size(); } return size; }
@Override public void run2() throws CacheException { final PartitionedRegion pr = (PartitionedRegion)cache.getRegion(regionName); Set<BucketRegion> bucket=pr.getDataStore().getAllLocalBucketRegions(); Iterator itr=bucket.iterator(); while(itr.hasNext()) { BucketRegion br=(BucketRegion)itr.next(); getLogWriter().info("Print "+ br.size()); } } });
public Object call() throws Exception { PartitionedRegion custRegion = (PartitionedRegion)getGemfireCache().getRegion(CUSTOMER); Set<BucketRegion> bucketSet = custRegion.getDataStore().getAllLocalPrimaryBucketRegions(); int count = 0; for (BucketRegion br : bucketSet) { count += br.size(); } return count; } });
public int localSizeForProcessor() { int size = 0; for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if(((PartitionedRegion)prQ.getRegion()).getDataStore() != null) { Set<BucketRegion> primaryBuckets = ((PartitionedRegion)prQ.getRegion()).getDataStore().getAllLocalPrimaryBucketRegions(); for (BucketRegion br : primaryBuckets) { if(br.getId() % this.nDispatcher == this.index) size += br.size(); } } if (logger.isDebugEnabled()) { logger.debug("The name of the queue region is {} and the size is {}", prQ.getFullPath(), size); } } return size /*+ sender.getTmpQueuedEventSize()*/; }
public Object call() throws Exception { try { final PartitionedRegion pr = (PartitionedRegion)cache .getRegion(regionName); for (final Iterator i = pr.getDataStore().getAllLocalBuckets() .iterator(); i.hasNext();) { final Map.Entry entry = (Map.Entry)i.next(); final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); if (bucketRegion == null) { continue; } getLogWriter().info( "FINAL bucket= " + bucketRegion.getFullPath() + "size= " + bucketRegion.size()); } return new Long(((AbstractLRURegionMap)pr.entries)._getLruList() .stats().getEvictions()); } finally { } } };
logger.debug("Starting PR entries chunking for {} entries", keys.size());
public void testCheckEntryLruEvictionsIn1DataStore() { int extraEntries=10; createCache(); maxEnteries=20; createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 5, 1, 1000,maxEnteries); final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); getLogWriter().info( "PR- " +pr.getEvictionAttributes().getMaximum()); for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) { pr.put(new Integer(counter), new byte[1 * 1024 * 1024]); } assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i .hasNext();) { final Map.Entry entry = (Map.Entry)i.next(); final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); if (bucketRegion == null) { continue; } getLogWriter().info( "FINAL bucket= " + bucketRegion.getFullPath() + "size= " + bucketRegion.size() + " count= "+bucketRegion.entryCount()); assertEquals(4,bucketRegion.size()); } }
@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (logger.isDebugEnabled()) { logger.debug("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
MapMessage map = new MapMessage(); map.put("BucketId", br.getId()); map.put("Size", br.size()); map.put("Bytes", br.getTotalBytes()); InternalDistributedMember m = pr.getBucketPrimary(br.getId());
private void destroyAllEntries(Set keysToDestroy, long batchKey) { for(Object key : keysToDestroy) { if (getCache().getLoggerI18n().fineEnabled()) { getCache() .getLoggerI18n() .fine("Destroying the entries after creating ColumnBatch " + key + " batchid " + batchKey + " total size " + this.size() + " keysToDestroy size " + keysToDestroy.size()); } EntryEventImpl event = EntryEventImpl.create( getPartitionedRegion(), Operation.DESTROY, null, null, null, false, this.getMyId()); event.setKey(key); event.setBucketId(this.getId()); TXStateInterface txState = event.getTXState(this); if (txState != null) { event.setRegion(this); txState.destroyExistingEntry(event, true, null); } else { this.getPartitionedRegion().basicDestroy(event,true,null); } } if (getCache().getLoggerI18n().fineEnabled()) { getCache() .getLoggerI18n() .fine("Destroyed all for batchID " + batchKey + " total size " + this.size()); } }
logger.fine("Starting PR entries chunking for " + String.valueOf(keys.size()) + " entries");
@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (getLogWriterI18n().fineEnabled()) { getLogWriterI18n().fine("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
try { bucketRegion = pr.getDataStore().getInitializedBucketForId(null, bucketId); totalRegionSize += bucketRegion.size(); } catch (ForceReattemptException e) {