public AckReaderThread(GatewaySender sender, String name) { super("AckReaderThread for : " + name); this.setDaemon(true); this.cache = ((AbstractGatewaySender) sender).getCache(); }
protected void waitForFailoverCompletion() { synchronized (this.failoverCompletedLock) { if (this.failoverCompleted) { return; } logger.info("{} : Waiting for failover completion", this); try { while (!this.failoverCompleted) { this.failoverCompletedLock.wait(); } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); this.sender.getCache().getCancelCriterion().checkCancelInProgress(ex); logger.info("{}: did not wait for failover completion due to interruption.", this); } } }
/** Start the background batch removal thread. */ public void start() { // at present, this won't be accessed by multiple threads, // still, it is safer approach to synchronize it synchronized (ParallelGatewaySenderQueue.class) { if (removalThread == null) { removalThread = new BatchRemovalThread(this.sender.getCache(), this); removalThread.start(); } } }
/** * Mark all PDX types as requiring dispatch so that they will be sent over the connection again. * */ public void checkIfPdxNeedsResend(int remotePdxSize) { InternalCache cache = this.sender.getCache(); Region<Object, Object> pdxRegion = cache.getRegion(PeerTypeRegistration.REGION_NAME); // The peer has not seen all of our PDX types. This may be because // they have been lost on the remote side. Resend the PDX types. if (pdxRegion != null && pdxRegion.size() > remotePdxSize) { rebuildPdxList = true; } }
private void createSender() { // Mock gateway sender this.sender = mock(AbstractGatewaySender.class); when(this.sender.getCache()).thenReturn(this.cache); }
public void sendQueueRemovalMesssageForDroppedEvent(PartitionedRegion prQ, int bucketId, Object key) { final HashMap<String, Map<Integer, List>> temp = new HashMap<String, Map<Integer, List>>(); Map bucketIdToDispatchedKeys = new ConcurrentHashMap(); temp.put(prQ.getFullPath(), bucketIdToDispatchedKeys); addRemovedEventToMap(bucketIdToDispatchedKeys, bucketId, key); Set<InternalDistributedMember> recipients = removalThread.getAllRecipients(sender.getCache(), temp); if (!recipients.isEmpty()) { ParallelQueueRemovalMessage pqrm = new ParallelQueueRemovalMessage(temp); pqrm.setRecipients(recipients); sender.getCache().getInternalDistributedSystem().getDistributionManager().putOutgoing(pqrm); } }
public static AbstractGatewaySender createGatewaySender(GemFireCacheImpl cache) { // Mock gateway sender AbstractGatewaySender sender = mock(AbstractGatewaySender.class); when(sender.getCache()).thenReturn(cache); CancelCriterion cancelCriterion = mock(CancelCriterion.class); when(sender.getCancelCriterion()).thenReturn(cancelCriterion); return sender; }
public void initDLockService() { InternalDistributedSystem ds = this.sender.getCache().getInternalDistributedSystem(); String dlsName = getDLockServiceName(); this.lockService = DistributedLockService.getServiceNamed(dlsName); if (this.lockService == null) { this.lockService = DLockService.create(dlsName, ds, true, true, true); } Assert.assertTrue(this.lockService != null); if (logger.isDebugEnabled()) { logger.debug("{}: Obtained DistributedLockService: {}", this, this.lockService); } }
@Override protected void initializeMessageQueue(String id) { Set<Region> targetRs = new HashSet<Region>(); for (InternalRegion region : sender.getCache().getApplicationRegions()) { if (region.getAllGatewaySenderIds().contains(id)) { targetRs.add(region); } } if (logger.isDebugEnabled()) { logger.debug("The target Regions are(PGSEP) {}", targetRs); } ParallelGatewaySenderQueue queue; queue = new ParallelGatewaySenderQueue(this.sender, targetRs, this.index, this.nDispatcher); queue.start(); this.queue = queue; if (((ParallelGatewaySenderQueue) queue).localSize() > 0) { ((ParallelGatewaySenderQueue) queue).notifyEventProcessorIfRequired(); } }
@Before public void createParallelGatewaySenderQueue() { cache = mock(GemFireCacheImpl.class); sender = mock(AbstractGatewaySender.class); CancelCriterion cancelCriterion = mock(CancelCriterion.class); when(sender.getCancelCriterion()).thenReturn(cancelCriterion); when(sender.getCache()).thenReturn(cache); when(sender.getMaximumQueueMemory()).thenReturn(100); when(sender.getLifeCycleLock()).thenReturn(new ReentrantReadWriteLock()); metaRegionFactory = mock(MetaRegionFactory.class); queue = new ParallelGatewaySenderQueue(sender, Collections.emptySet(), 0, 1, metaRegionFactory); }
public void sendBatchDestroyOperationForDroppedEvent(EntryEventImpl dropEvent, int index) { EntryEventImpl destroyEvent = EntryEventImpl.create((LocalRegion) this.queue.getRegion(), Operation.DESTROY, (long) index, null/* newValue */, null, false, sender.getCache().getMyId()); destroyEvent.setEventId(dropEvent.getEventId()); destroyEvent.disallowOffHeapValues(); destroyEvent.setTailKey(-1L); if (logger.isDebugEnabled()) { logger.debug( "SerialGatewaySenderEventProcessor sends BatchDestroyOperation to secondary for event {}", destroyEvent); } try { BatchDestroyOperation op = new BatchDestroyOperation(destroyEvent); op.distribute(); if (logger.isDebugEnabled()) { logger.debug("BatchRemovalThread completed destroy of dropped event {}", dropEvent); } } catch (Exception ignore) { if (logger.isDebugEnabled()) { logger.debug( "Exception in sending dropped event could be ignored in order not to interrupt sender starting", ignore); } } }
for (InternalRegion pr : sender.getCache().getApplicationRegions()) { if (((LocalRegion) pr).getAllGatewaySenderIds().contains(sender.getId())) { targetRs.add(pr);
InternalCache cache = this.sender.getCache(); Region<Object, Object> pdxRegion = cache.getRegion(PeerTypeRegistration.REGION_NAME);
public SerialGatewaySenderQueue(AbstractGatewaySender abstractSender, String regionName, CacheListener listener) { // The queue starts out with headKey and tailKey equal to -1 to force // them to be initialized from the region. this.regionName = regionName; this.headKey = -1; this.tailKey.set(-1); this.indexes = new HashMap<String, Map<Object, Long>>(); this.enableConflation = abstractSender.isBatchConflationEnabled(); this.diskStoreName = abstractSender.getDiskStoreName(); this.batchSize = abstractSender.getBatchSize(); this.enablePersistence = abstractSender.isPersistenceEnabled(); if (this.enablePersistence) { this.isDiskSynchronous = abstractSender.isDiskSynchronous(); } else { this.isDiskSynchronous = false; } this.maximumQueueMemory = abstractSender.getMaximumMemeoryPerDispatcherQueue(); this.stats = abstractSender.getStatistics(); initializeRegion(abstractSender, listener); // Increment queue size. Fix for bug 51988. this.stats.incQueueSize(this.region.size()); this.removalThread = new BatchRemovalThread(abstractSender.getCache()); this.removalThread.start(); this.sender = abstractSender; if (logger.isDebugEnabled()) { logger.debug("{}: Contains {} elements", this, size()); } }
@SuppressWarnings({"rawtypes", "unchecked", "deprecation"}) private static synchronized Region<String, Integer> initializeEventIdIndexMetaDataRegion( AbstractGatewaySender sender) { final InternalCache cache = sender.getCache(); Region<String, Integer> region = cache.getRegion(META_DATA_REGION_NAME); if (region == null) {
final InternalCache gemCache = sender.getCache(); this.region = gemCache.getRegion(this.regionName); if (this.region == null) {
InternalCache cache = this.sender.getCache(); Region region = (PartitionedRegion) cache.getRegion(regionPath); if (region != null && !region.isDestroyed()) {
return; InternalCache cache = sender.getCache(); final String prQName = getQueueName(sender.getId(), userRegion.getFullPath()); prQ = (PartitionedRegion) cache.getRegion(prQName);
InternalCache cache = this.sender.getCache(); if (cache != null && !cache.isClosed()) { if (this.sender.isPrimary() && (this.connection != null)) {