@Override public void execute(Runnable command) { delegate.execute(command); }
private void resumeReceivingFromPausedConsumersIfNeeded() { lock.readLock().lock(); try { if (incomingMessages.size() <= sharedQueueResumeThreshold && !pausedConsumers.isEmpty()) { while (true) { ConsumerImpl<T> consumer = pausedConsumers.poll(); if (consumer == null) { break; } // if messages are readily available on consumer we will attempt to writeLock on the same thread client.eventLoopGroup().execute(() -> { receiveMessageFromConsumer(consumer); }); } } } finally { lock.readLock().unlock(); } }
private void receiveMessageFromConsumer(ConsumerImpl<T> consumer) { consumer.receiveAsync().thenAccept(message -> { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Receive message from sub consumer:{}", topic, subscription, consumer.getTopic()); } // Process the message, add to the queue and trigger listener or async callback messageReceived(consumer, message); // we're modifying pausedConsumers lock.writeLock().lock(); try { int size = incomingMessages.size(); if (size >= maxReceiverQueueSize || (size > sharedQueueResumeThreshold && !pausedConsumers.isEmpty())) { // mark this consumer to be resumed later: if No more space left in shared queue, // or if any consumer is already paused (to create fair chance for already paused consumers) pausedConsumers.add(consumer); } else { // Schedule next receiveAsync() if the incoming queue is not full. Use a different thread to avoid // recursion and stack overflow client.eventLoopGroup().execute(() -> { receiveMessageFromConsumer(consumer); }); } } finally { lock.writeLock().unlock(); } }); }
}); }).exceptionally(exception -> { eventLoopGroup.execute(() -> { log.warn("Failed to open connection to {} : {}", physicalAddress, exception.getMessage()); cleanupConnection(logicalAddress, connectionKey, cnxFuture);
@Override public void execute(Runnable command) { delegate.execute(command); }
@Override public void execute(Runnable command) { delegate.execute(command); } }
@Override public void execute(Runnable command) { delegate.execute(command); }
@Override public void execute(Runnable command) { delegate.execute(command); }
@Override public void execute(Runnable command) { delegate.execute(command); } }
@Override public void execute(Runnable command) { delegate.execute(command); }
@Override public void execute(Runnable command) { delegate.execute(command); }
@Override public void execute(Runnable command) { delegate.execute(command); }
public void scheduleLeaderElection() { peerEventLoopGroup.execute(peer::scheduleElection); }
public CompletableFuture<Collection<String>> ping() { final CompletableFuture<Collection<String>> future = new CompletableFuture<>(); peerEventLoopGroup.execute(() -> peer.ping(future)); return future; }
public CompletableFuture<Void> connect(final String host, final int port) { final CompletableFuture<Void> connectToHostFuture = new CompletableFuture<>(); peerEventLoopGroup.execute(() -> peer.connectTo(host, port, connectToHostFuture)); return connectToHostFuture; }
public CompletableFuture<Void> leave() { final CompletableFuture<Void> future = new CompletableFuture<>(); peerEventLoopGroup.execute(() -> peer.leave(future)); if (keepAliveFuture != null && timeoutPingsFuture != null) { keepAliveFuture.cancel(false); timeoutPingsFuture.cancel(false); keepAliveFuture = null; timeoutPingsFuture = null; } return future; }
public void disconnect(final String peerName) { peerEventLoopGroup.execute(() -> peer.disconnect(peerName)); }
private void resumeReceivingFromPausedConsumersIfNeeded() { lock.readLock().lock(); try { if (incomingMessages.size() <= sharedQueueResumeThreshold && !pausedConsumers.isEmpty()) { while (true) { ConsumerImpl<T> consumer = pausedConsumers.poll(); if (consumer == null) { break; } // if messages are readily available on consumer we will attempt to writeLock on the same thread client.eventLoopGroup().execute(() -> { receiveMessageFromConsumer(consumer); }); } } } finally { lock.readLock().unlock(); } }
private void receiveMessageFromConsumer(ConsumerImpl consumer) { consumer.receiveAsync().thenAccept(message -> { // Process the message, add to the queue and trigger listener or async callback messageReceived(message); if (incomingMessages.size() >= maxReceiverQueueSize || (incomingMessages.size() > sharedQueueResumeThreshold && !pausedConsumers.isEmpty())) { // mark this consumer to be resumed later: if No more space left in shared queue, // or if any consumer is already paused (to create fair chance for already paused consumers) pausedConsumers.add(consumer); } else { // Schedule next receiveAsync() if the incoming queue is not full. Use a different thread to avoid // recursion and stack overflow client.eventLoopGroup().execute(() -> { receiveMessageFromConsumer(consumer); }); } }); }
@Override public void operationComplete(ChannelFuture channelFuture) throws Exception { if (channelFuture.isSuccess()) { channel = (DatagramChannel)channelFuture.channel(); // perform initial search request group.execute(searchRequestRunnable); // schedule two quick follow-up search requests to make sure a camera didn't miss the first request group.schedule(searchRequestRunnable, SEARCH_REQUEST_INITIAL_FREQUENCY_SECONDS, TimeUnit.SECONDS); group.schedule(searchRequestRunnable, SEARCH_REQUEST_INITIAL_FREQUENCY_SECONDS * 2, TimeUnit.SECONDS); // set up a recurring search request so we can keep track of cameras coming/going searchFuture = group.scheduleAtFixedRate( searchRequestRunnable, SEARCH_REQUEST_INITIAL_FREQUENCY_SECONDS * 2 + getSearchRequestFrequencySeconds, getSearchRequestFrequencySeconds, TimeUnit.SECONDS ); } else { logger.error("Bind attempt failed", channelFuture.cause()); } } });