/** * Fix for Bug#45917 We are updating the seqNumber so that new seqNumbers are generated starting * from the latest in the system. */ public void updateEventSeqNum(long l) { Atomics.setIfGreater(this.eventSeqNum, l); if (logger.isDebugEnabled()) { logger.debug("WAN: On bucket {}, setting the seq number as {} before GII", getId(), l); } }
public Set<Integer> getAllLocalPrimaryBucketIdsBetweenProvidedIds(int low, int high) { Set<Integer> bucketIds = new HashSet<Integer>(); for (Map.Entry<Integer, BucketRegion> bucketEntry : getAllLocalBuckets()) { BucketRegion bucket = bucketEntry.getValue(); if (bucket.getBucketAdvisor().isPrimary() && (bucket.getId() >= low) && (bucket.getId() < high)) { bucketIds.add(Integer.valueOf(bucket.getId())); } } return bucketIds; }
public long generateTailKey() { long key = this.eventSeqNum.addAndGet(this.partitionedRegion.getTotalNumberOfBuckets()); if (key < 0 || key % getPartitionedRegion().getTotalNumberOfBuckets() != getId()) { logger.error("ERROR! The sequence number {} generated for the bucket {} is incorrect.", new Object[] {key, getId()}); } if (logger.isDebugEnabled()) { logger.debug("WAN: On primary bucket {}, setting the seq number as {}", getId(), this.eventSeqNum.get()); } return eventSeqNum.get(); }
public Set<Integer> getAllLocalPrimaryBucketIds() { Set<Integer> bucketIds = new HashSet<Integer>(); for (Map.Entry<Integer, BucketRegion> bucketEntry : getAllLocalBuckets()) { BucketRegion bucket = bucketEntry.getValue(); if (bucket.getBucketAdvisor().isPrimary()) { bucketIds.add(Integer.valueOf(bucket.getId())); } } return bucketIds; }
@Override public void cleanupFailedInitialization() { this.preDestroyBucket(this.getId()); super.cleanupFailedInitialization(); }
public String displayContent() { int size = 0; StringBuffer sb = new StringBuffer(); for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if (prQ != null && prQ.getDataStore() != null) { Set<BucketRegion> allLocalBuckets = prQ.getDataStore().getAllLocalBucketRegions(); for (BucketRegion br : allLocalBuckets) { if (br.size() > 0) { sb.append("bucketId=" + br.getId() + ":" + br.keySet() + ";"); } } } } return sb.toString(); }
@Override public IndexRepository getRepository(Region region, Object key, Object callbackArg) throws BucketNotFoundException { BucketRegion userBucket = userRegion.getBucketRegion(key, callbackArg); if (userBucket == null) { throw new BucketNotFoundException("User bucket was not found for region " + region + "key " + key + " callbackarg " + callbackArg); } return getRepository(userBucket.getId()); }
public boolean areSecondariesPingable() { Set<InternalDistributedMember> hostingservers = this.partitionedRegion.getRegionAdvisor().getBucketOwners(this.getId()); hostingservers.remove(cache.getDistributedSystem().getDistributedMember()); if (cache.getLogger().fineEnabled()) cache.getLogger() .fine("Pinging secondaries of bucket " + this.getId() + " on servers " + hostingservers); if (hostingservers.size() == 0) return true; return ServerPingMessage.send(cache, hostingservers); }
@Override public Collection<IndexRepository> getRepositories(RegionFunctionContext ctx, boolean waitForRepository) throws BucketNotFoundException { Region<Object, Object> region = ctx.getDataSet(); Set<Integer> buckets = ((InternalRegionFunctionContext) ctx).getLocalBucketSet(region); ArrayList<IndexRepository> repos = new ArrayList<IndexRepository>(buckets.size()); for (Integer bucketId : buckets) { BucketRegion userBucket = userRegion.getDataStore().getLocalBucketById(bucketId); if (userBucket == null) { throw new BucketNotFoundException( "User bucket was not found for region " + region + "bucket id " + bucketId); } else { if (index.isIndexAvailable(userBucket.getId()) || userBucket.isEmpty() || waitForRepository) { repos.add(getRepository(userBucket.getId())); } else { waitingThreadPoolFromDM.execute(() -> { try { getRepository(userBucket.getId()); } catch (Exception e) { logger.debug("Lucene Index creation in progress.", e); } }); throw new LuceneIndexCreationInProgressException( "Lucene Index creation in progress for bucket: " + userBucket.getId()); } } } return repos; }
private void setEventSeqNum() { if (this.partitionedRegion.isShadowPR() && this.partitionedRegion.getColocatedWith() != null) { PartitionedRegion parentPR = ColocationHelper.getLeaderRegion(this.partitionedRegion); BucketRegion parentBucket = parentPR.getDataStore().getLocalBucketById(getId()); // needs to be set only once. if (parentBucket.eventSeqNum == null) { parentBucket.eventSeqNum = new AtomicLong5(getId()); } } if (this.partitionedRegion.getColocatedWith() == null) { this.eventSeqNum = new AtomicLong5(getId()); } else { PartitionedRegion parentPR = ColocationHelper.getLeaderRegion(this.partitionedRegion); BucketRegion parentBucket = parentPR.getDataStore().getLocalBucketById(getId()); if (parentBucket == null && logger.isDebugEnabled()) { logger.debug("The parentBucket of region {} bucketId {} is NULL", this.partitionedRegion.getFullPath(), getId()); } Assert.assertTrue(parentBucket != null); this.eventSeqNum = parentBucket.eventSeqNum; } }
if (getBucketAdvisor().isPrimary()) { long key = this.eventSeqNum.addAndGet(this.partitionedRegion.getTotalNumberOfBuckets()); if (key < 0 || key % getPartitionedRegion().getTotalNumberOfBuckets() != getId()) { logger.error("ERROR! The sequence number {} generated for the bucket {} is incorrect.", new Object[] {key, getId()}); logger.debug("WAN: On primary bucket {}, setting the seq number as {}", getId(), this.eventSeqNum.get()); logger.debug("WAN: On secondary bucket {}, setting the seq number as {}", getId(), event.getTailKey());
BucketRegion bucket = bucketEntry.getValue(); if (bucket.getBucketAdvisor().isPrimary()) { int bId = bucket.getId(); if (bId % this.nDispatcher == this.index) { thisProcessorBuckets.add(bId);
@Override protected BucketRegion setUpMockBucket(int id) throws BucketNotFoundException { BucketRegion mockBucket = Mockito.mock(BucketRegion.class); when(mockBucket.getId()).thenReturn(id); when(userRegion.getBucketRegion(eq(id), eq(null))).thenReturn(mockBucket); when(userDataStore.getLocalBucketById(eq(id))).thenReturn(mockBucket); when(userRegion.getBucketRegion(eq(id + 113), eq(null))).thenReturn(mockBucket); when(userDataStore.getLocalBucketById(eq(id + 113))).thenReturn(mockBucket); dataBuckets.put(id, mockBucket); repoManager.computeRepository(mockBucket.getId()); return mockBucket; }
protected void invokePartitionListenerAfterBucketRemoved() { PartitionListener[] partitionListeners = getPartitionedRegion().getPartitionListeners(); if (partitionListeners == null || partitionListeners.length == 0) { return; } for (int i = 0; i < partitionListeners.length; i++) { PartitionListener listener = partitionListeners[i]; if (listener != null) { listener.afterBucketRemoved(getId(), keySet()); } } }
protected void invokePartitionListenerAfterBucketCreated() { PartitionListener[] partitionListeners = getPartitionedRegion().getPartitionListeners(); if (partitionListeners == null || partitionListeners.length == 0) { return; } for (int i = 0; i < partitionListeners.length; i++) { PartitionListener listener = partitionListeners[i]; if (listener != null) { listener.afterBucketCreated(getId(), keySet()); } } }
public int localSizeForProcessor() { int size = 0; for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if (((PartitionedRegion) prQ.getRegion()).getDataStore() != null) { Set<BucketRegion> primaryBuckets = ((PartitionedRegion) prQ.getRegion()).getDataStore().getAllLocalPrimaryBucketRegions(); for (BucketRegion br : primaryBuckets) { if (br.getId() % this.nDispatcher == this.index) size += br.size(); } } if (logger.isDebugEnabled()) { logger.debug("The name of the queue region is {} and the size is {}", prQ.getFullPath(), size); } } return size /* + sender.getTmpQueuedEventSize() */; }
protected BucketRegion setUpMockBucket(int id) throws BucketNotFoundException { BucketRegion mockBucket = Mockito.mock(BucketRegion.class); BucketRegion fileAndChunkBucket = Mockito.mock(BucketRegion.class); // Allowing the fileAndChunkBucket to behave like a map so that the IndexWriter operations don't // fail Fakes.addMapBehavior(fileAndChunkBucket); when(fileAndChunkBucket.getFullPath()).thenReturn("File" + id); when(mockBucket.getId()).thenReturn(id); when(userRegion.getBucketRegion(eq(id), eq(null))).thenReturn(mockBucket); when(userDataStore.getLocalBucketById(eq(id))).thenReturn(mockBucket); when(userRegion.getBucketRegion(eq(id + 113), eq(null))).thenReturn(mockBucket); when(userDataStore.getLocalBucketById(eq(id + 113))).thenReturn(mockBucket); when(fileDataStore.getLocalBucketById(eq(id))).thenReturn(fileAndChunkBucket); fileAndChunkBuckets.put(id, fileAndChunkBucket); dataBuckets.put(id, mockBucket); BucketAdvisor mockBucketAdvisor = Mockito.mock(BucketAdvisor.class); when(fileAndChunkBucket.getBucketAdvisor()).thenReturn(mockBucketAdvisor); when(mockBucketAdvisor.isPrimary()).thenReturn(true); return mockBucket; } }
protected void distributeUpdateOperation(EntryEventImpl event, long lastModified) { long token = -1; UpdateOperation op = null; try { if (!event.isOriginRemote() && !event.isNetSearch() && getBucketAdvisor().isPrimary()) { if (event.isBulkOpInProgress()) { // consolidate the UpdateOperation for each entry into a PutAllMessage // since we did not call basicPutPart3(), so we have to explicitly addEntry here event.getPutAllOperation().addEntry(event, this.getId()); } else { // before distribute: BR's put op = new UpdateOperation(event, lastModified); token = op.startOperation(); if (logger.isDebugEnabled()) { logger.debug("sent update operation : for region : {}: with event: {}", this.getName(), event); } } } if (!event.getOperation().isPutAll()) { // putAll will invoke listeners later event.invokeCallbacks(this, true, true); } } finally { if (op != null) { op.endOperation(token); } } }
protected void distributeDestroyOperation(EntryEventImpl event) { long token = -1; DestroyOperation op = null; try { if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) { logger.trace(LogMarker.DM_VERBOSE, "BR.basicDestroy: this cache has already seen this event {}", event); } if (!event.isOriginRemote() && getBucketAdvisor().isPrimary()) { if (event.isBulkOpInProgress()) { // consolidate the DestroyOperation for each entry into a RemoveAllMessage event.getRemoveAllOperation().addEntry(event, this.getId()); } else { // This cache has processed the event, forward operation // and event messages to backup buckets // before distribute: BR's destroy, not to trigger callback here event.setOldValueFromRegion(); op = new DestroyOperation(event); token = op.startOperation(); } } if (!event.getOperation().isRemoveAll()) { // removeAll will invoke listeners later event.invokeCallbacks(this, true, false); } } finally { if (op != null) { op.endOperation(token); } } }
int bucketID = br.getId();