/** * test hook invokation */ public static void beforeGetInitialImage(DistributedRegion region) { if (internalBeforeGetInitialImage != null && internalBeforeGetInitialImage.getRegionName().equals(region.getName())) { internalBeforeGetInitialImage.run(); } }
/** * test hook invokation */ public static void beforeGetInitialImage(DistributedRegion region) { if (internalBeforeGetInitialImage != null && internalBeforeGetInitialImage.getRegionName().equals(region.getName())) { internalBeforeGetInitialImage.run(); } }
/** * process the memberid:threadid -> sequence# information transmitted * along with an initial image from another cache */ void processRegionStateMessage(RegionStateMessage msg) { if (msg.eventState != null) { logger.debug("Applying event state to region {} from {}", region.getName(), msg.getSender()); region.recordEventState(msg.getSender(), msg.eventState); } if (msg.versionVector != null && msg.getSender().getVersionObject().compareTo(Version.GFE_80) < 0 && region.getConcurrencyChecksEnabled()) { // for older version, save received rvv from RegionStateMessage logger.debug("Applying version vector to {}: {}", region.getName(), msg.versionVector); // pack the original RVV, then save the received one if (internalBeforeSavedReceivedRVV != null && internalBeforeSavedReceivedRVV.getRegionName().equals(region.getName())) { internalBeforeSavedReceivedRVV.run(); } saveReceivedRVV(msg.versionVector); if (internalAfterSavedReceivedRVV != null && internalAfterSavedReceivedRVV.getRegionName().equals(region.getName())) { internalAfterSavedReceivedRVV.run(); } } }
public void checkSameSenderIdsAvailableOnAllNodes() { List senderIds = this.getCacheDistributionAdvisor() .adviseSameGatewaySenderIds(getGatewaySenderIds()); if (!senderIds.isEmpty()) { throw new GatewaySenderConfigurationException( LocalizedStrings.Region_REGION_0_HAS_1_GATEWAY_SENDER_IDS_ANOTHER_CACHE_HAS_THE_SAME_REGION_WITH_2_GATEWAY_SENDER_IDS_FOR_REGION_ACROSS_ALL_MEMBERS_IN_DS_GATEWAY_SENDER_IDS_SHOULD_BE_SAME .toLocalizedString(new Object[] { this.getName(), senderIds.get(0), senderIds.get(1) })); } List asycnQueueIds = this.getCacheDistributionAdvisor() .adviseSameAsyncEventQueueIds(getAsyncEventQueueIds()); if (!asycnQueueIds.isEmpty()) { throw new GatewaySenderConfigurationException( LocalizedStrings.Region_REGION_0_HAS_1_ASYNC_EVENT_QUEUE_IDS_ANOTHER_CACHE_HAS_THE_SAME_REGION_WITH_2_ASYNC_EVENT_QUEUE_IDS_FOR_REGION_ACROSS_ALL_MEMBERS_IN_DS_ASYNC_EVENT_QUEUE_IDS_SHOULD_BE_SAME .toLocalizedString(new Object[] { this.getName(), asycnQueueIds.get(0), asycnQueueIds.get(1) })); } } /**
CacheClientNotifier ccn = CacheClientNotifier.getInstance(); CacheClientProxy proxy = ((HAContainerWrapper)ccn.getHaContainer()).getProxy( region.getName()); logger.debug("Processing FilterInfo for proxy: {} : {}", proxy, msg); } catch (Exception ex) {
/** distribute an update operation */ protected void distributeUpdate(EntryEventImpl event, long lastModified, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue) { // an update from a netSearch is not distributed if (!event.isOriginRemote() && !event.isNetSearch() && !event.isBulkOpInProgress()) { boolean distribute = true; if (event.getInhibitDistribution()) { // this has already been distributed by a one-hop operation distribute = false; } if (distribute) { UpdateOperation op = new UpdateOperation(event, lastModified); if (logger.isTraceEnabled()) { logger.trace("distributing operation for event : {} : for region : {}", event, this.getName()); } op.distribute(); } } }
/** * process the memberid:threadid -> sequence# information transmitted * along with an initial image from another cache */ void processRegionStateMessage(RegionStateMessage msg) { if (msg.eventState != null) { if (region.cache.getLoggerI18n().fineEnabled() || EventTracker.VERBOSE) { region.cache.getLoggerI18n().info( LocalizedStrings.DEBUG, "Applying event state to region " + region.getName() + " from " + msg.getSender()); } region.recordEventState(msg.getSender(), msg.eventState); } }
protected void saveReceivedRVV(RegionVersionVector rvv) { assert rvv != null; // Make sure the RVV is at least as current as // the provider's was when the GII began. This ensures that a // concurrent clear() doesn't prevent the new region's RVV from being // initialized and that any vector entries that are no longer represented // by stamps in the region are not lost if (logger.isTraceEnabled(LogMarker.GII)) { logger.trace(LogMarker.GII, "Applying received version vector {} to {}", rvv.fullToString(), region.getName()); } //TODO - RVV - Our current RVV might reflect some operations //that are concurrent updates. We want to keep those updates. However //it might also reflect things that we recovered from disk that we are going //to remove. We'll need to remove those from the RVV somehow. region.getVersionVector().recordVersions(rvv); if(region.getDataPolicy().withPersistence()) { region.getDiskRegion().writeRVV(region, false); region.getDiskRegion().writeRVVGC(region); } if (logger.isTraceEnabled(LogMarker.GII)) { logger.trace(LogMarker.GII, "version vector is now {}", region.getVersionVector().fullToString()); } }
if (!r.isInitialized() && !r.isUsedForPartitionedRegionBucket()) { if (logger.isDebugEnabled()) { logger.debug("recording that {} has left during initialization of {}", memberId, r.getName());
rrm.processorId = rvv_processor.getProcessorId(); dm.putOutgoing(rrm); if (internalAfterRequestRVV != null && internalAfterRequestRVV.getRegionName().equals(this.region.getName())) { internalAfterRequestRVV.run();
logger.info(LocalizedMessage.create(LocalizedStrings.DistributedRegion_INITIALIZING_REGION_COMPLETED_0, this.getName()));
protected RequestRVVProcessor getRVVDetailsFromProvider(final DistributionManager dm, InternalDistributedMember recipient, boolean targetReinitialized) { RegionVersionVector received_rvv = null; // RequestRVVMessage is to send rvv of gii provider for both persistent and non-persistent region RequestRVVMessage rrm = new RequestRVVMessage(); rrm.regionPath = this.region.getFullPath(); rrm.targetReinitialized = targetReinitialized; rrm.setRecipient(recipient); RequestRVVProcessor rvv_processor = new RequestRVVProcessor(this.region.getSystem(), recipient); rrm.processorId = rvv_processor.getProcessorId(); dm.putOutgoing(rrm); if (internalAfterRequestRVV != null && internalAfterRequestRVV.getRegionName().equals(this.region.getName())) { internalAfterRequestRVV.run(); } try { rvv_processor.waitForRepliesUninterruptibly(); } catch (InternalGemFireException ex) { Throwable cause = ex.getCause(); if (cause instanceof com.gemstone.gemfire.cache.TimeoutException) { throw (com.gemstone.gemfire.cache.TimeoutException)cause; } throw ex; } catch (ReplyException e) { if(!region.isDestroyed()) { e.handleAsUnexpected(); } } return rvv_processor; }
logger.info("Region {} is requesting synchronization with {} for {}", this.region.getName(), target, lostMember); logger.debug("{} is done synchronizing with {}", this.region.getName(), target); logger.debug("{} received no synchronization data from {} which could mean that we are already synchronized", this.region.getName(), target);
protected void saveReceivedRVV(RegionVersionVector rvv) { assert rvv != null; // Make sure the RVV is at least as current as // the provider's was when the GII began. This ensures that a // concurrent clear() doesn't prevent the new region's RVV from being // initialized and that any vector entries that are no longer represented // by stamps in the region are not lost if (TRACE_GII) { region.getLogWriterI18n().info(LocalizedStrings.DEBUG, "Applying received version vector "+rvv.fullToString()+ " to " + region.getName()); } //TODO - RVV - Our current RVV might reflect some operations //that are concurrent updates. We want to keep those updates. However //it might also reflect things that we recovered from disk that we are going //to remove. We'll need to remove those from the RVV somehow. region.getVersionVector().recordVersions(rvv, null); if(region.getDataPolicy().withPersistence()) { region.getDiskRegion().writeRVV(region, false); region.getDiskRegion().writeRVVGC(region); } if (TRACE_GII) { region.getLogWriterI18n().info(LocalizedStrings.DEBUG, "version vector is now " + region.getVersionVector().fullToString()); } }
CacheClientNotifier ccn = CacheClientNotifier.getInstance(); CacheClientProxy proxy = ((HAContainerWrapper)ccn.getHaContainer()).getProxy( region.getName()); if (region.getCache().getLoggerI18n().fineEnabled()) { region.getCache().getLoggerI18n().fine("Processing FilterInfo for proxy: " + proxy + " : " + msg);
if (m.entries != null) { try { if (internalAfterReceivedImageReply != null && internalAfterReceivedImageReply.getRegionName().equals(region.getName())) { internalAfterReceivedImageReply.run();
if (!r.isInitialized() && !r.isUsedForPartitionedRegionBucket()) { if (r.getLogWriterI18n().fineEnabled()) { r.getLogWriterI18n().fine("recording that " + memberId +" has left during initialization of " + r.getName());
if (m.entries != null) { try { if (internalAfterReceivedImageReply != null && internalAfterReceivedImageReply.getRegionName().equals(region.getName())) { internalAfterReceivedImageReply.run();
if (internalAfterGIILock != null && internalAfterGIILock.getRegionName().equals(rgn.getName())) { internalAfterGIILock.run();
log.info(LocalizedStrings.DEBUG, "Region " + this.region.getName() + " is requesting synchronization with " + target + " for " + lostMember); this.region.getLogWriterI18n().fine(this.region.getName() + " is done synchronizing with " + target); } else { this.region.getLogWriterI18n().fine(this.region.getName() + " received no synchronization data from " + target + " which could mean that we are already synchronized");