@Test public void retryErrorPropagatedFromWorkQueueSubscriberHotPoisonSignalFlatMap() throws Exception { WorkQueueProcessor<Integer> wq = WorkQueueProcessor.<Integer>builder().autoCancel(false).build(); AtomicInteger onNextSignals = new AtomicInteger();
@Test public void forkJoinPoolWorkQueueRejectsSubscribers() { ExecutorService executorService = Executors.newWorkStealingPool(2); WorkQueueProcessor<String> bc = WorkQueueProcessor.<String>builder().executor(executorService).bufferSize(16).build(); CountDownLatch latch = new CountDownLatch(2); TestWorkQueueSubscriber spec1 = new TestWorkQueueSubscriber(latch, "spec1"); TestWorkQueueSubscriber spec2 = new TestWorkQueueSubscriber(latch, "spec2"); TestWorkQueueSubscriber spec3 = new TestWorkQueueSubscriber(latch, "spec3"); bc.subscribe(spec1); bc.subscribe(spec2); bc.subscribe(spec3); bc.onNext("foo"); bc.onComplete(); assertThat(spec1.error, is(nullValue())); assertThat(spec2.error, is(nullValue())); assertThat(spec3.error, is(notNullValue())); assertThat(spec3.error.getMessage(), is("The executor service could not accommodate another subscriber, detected limit 2")); try { latch.await(1, TimeUnit.SECONDS); } catch (InterruptedException e1) { fail(e1.toString()); } }
private void setupFakeProtocolListener() { workProcessor = WorkQueueProcessor.<ByteBuf>builder().autoCancel(false).build(); Flux<ByteBuf> bufferStream = Flux.from(processor) .window(windowBatch)
WorkQueueProcessor.<List<String>>builder().autoCancel(false).build();
@Override public Sink createSink(FlowConstruct flowConstruct, ReactiveProcessor function) { final long shutdownTimeout = flowConstruct.getMuleContext().getConfiguration().getShutdownTimeout(); WorkQueueProcessor<CoreEvent> processor = WorkQueueProcessor.<CoreEvent>builder().executor(ringBufferSchedulerSupplier.get()).bufferSize(bufferSize) .waitStrategy(waitStrategy.getReactorWaitStrategy()).build(); int subscriberCount = maxConcurrency < subscribers ? maxConcurrency : subscribers; CountDownLatch completionLatch = new CountDownLatch(subscriberCount); for (int i = 0; i < subscriberCount; i++) { processor.doOnSubscribe(subscription -> currentThread().setContextClassLoader(executionClassloader)).transform(function) .doFinally(s -> completionLatch.countDown()).subscribe(); } return new ReactorSink(processor.sink(), () -> { long start = currentTimeMillis(); if (!processor.awaitAndShutdown(ofMillis(shutdownTimeout))) { processor.forceShutdown(); } try { completionLatch.await(max(start - currentTimeMillis() + shutdownTimeout, 0l), MILLISECONDS); } catch (InterruptedException e) { currentThread().interrupt(); throw new MuleRuntimeException(e); } }, createOnEventConsumer(), bufferSize); }
private void setupFakeProtocolListener() { broadcaster = TopicProcessor.create(); final Processor<List<String>, List<String>> processor = WorkQueueProcessor.<List<String>>builder().autoCancel(false).build(); Flux.from(broadcaster) .buffer(5) .subscribe(processor); httpServer = HttpServer.create() .port(0) .route(r -> r.get("/data", (req, resp) -> resp.options(NettyPipeline.SendOptions::flushOnEach) .send(Flux.from(processor) .log("server") .timeout(Duration.ofSeconds( 2), Flux.empty()) .concatWith(Flux.just( new ArrayList<>())) .map(new DummyListEncoder( resp.alloc() ))))) .wiretap(true) .bindNow(); }
/** * Create a new shared WorkQueueProcessor using the passed buffer size, blockingWait * Strategy and auto-cancel. <p> A Shared Processor authorizes concurrent onNext calls * and is suited for multi-threaded publisher that will fan-in data. <p> A new Cached * ThreadExecutorPool will be implicitly created and will use the passed name to * qualify the created threads. * @param name Use a new Cached ExecutorService and assign this name to the created * threads * @param bufferSize A Backlog Size to mitigate slow subscribers * @param <E> Type of processed signals * @return a fresh processor */ public static <E> WorkQueueProcessor<E> share(String name, int bufferSize) { return WorkQueueProcessor.<E>builder().share(true).name(name).bufferSize(bufferSize).build(); }
/** * Create a new WorkQueueProcessor using the passed buffer size, blockingWait * Strategy and auto-cancel. <p> A new Cached ThreadExecutorPool will be implicitly * created and will use the passed name to qualify the created threads. * @param name Use a new Cached ExecutorService and assign this name to the created * threads * @param bufferSize A Backlog Size to mitigate slow subscribers * @param <E> Type of processed signals * @return a fresh processor */ public static <E> WorkQueueProcessor<E> create(String name, int bufferSize) { return WorkQueueProcessor.<E>builder().name(name).bufferSize(bufferSize).build(); }
@Override public void start() { startDelegateAppender(); processor = WorkQueueProcessor.<ILoggingEvent>builder().name("logger") .bufferSize(backlog) .autoCancel(false) .build(); processor.subscribe(this); }
/** * Create a new WorkQueueProcessor using {@link Queues#SMALL_BUFFER_SIZE} backlog size, * blockingWait Strategy and auto-cancel. <p> A new Cached ThreadExecutorPool will be * implicitly created. * @param <E> Type of processed signals * @return a fresh processor */ public static <E> WorkQueueProcessor<E> create() { return WorkQueueProcessor.<E>builder().build(); }
/** * Create a new {@link WorkQueueProcessor} {@link Builder} with default properties. * @return new WorkQueueProcessor builder */ public final static <T> Builder<T> builder() { return new Builder<>(); }
@Test public void retryErrorPropagatedFromWorkQueueSubscriberHotPoisonSignalParallel() throws Exception { WorkQueueProcessor<Integer> wq = WorkQueueProcessor.<Integer>builder().autoCancel(false).build(); AtomicInteger onNextSignals = new AtomicInteger();
@Test public void highRate() throws Exception { WorkQueueProcessor<String> queueProcessor = WorkQueueProcessor.<String>builder() .share(true) .name("Processor") .bufferSize(256) .waitStrategy(liteBlocking()) .build(); Scheduler timer = Schedulers.newSingle("Timer"); queueProcessor.bufferTimeout(32, Duration.ofMillis(2), timer)
throws Exception { AtomicInteger errors = new AtomicInteger(3); WorkQueueProcessor<Integer> wq = WorkQueueProcessor.<Integer>builder().autoCancel(false).build(); AtomicInteger onNextSignals = new AtomicInteger();
public void retryErrorPropagatedFromWorkQueueSubscriberHotPoisonSignalFlatMapPrefetch1() throws Exception { WorkQueueProcessor<Integer> wq = WorkQueueProcessor.<Integer>builder().autoCancel(false).build(); AtomicInteger onNextSignals = new AtomicInteger();
throws Exception { AtomicInteger errors = new AtomicInteger(3); WorkQueueProcessor<Integer> wq = WorkQueueProcessor.<Integer>builder().autoCancel(false).build(); AtomicInteger onNextSignals = new AtomicInteger();
@Test public void retryErrorPropagatedFromWorkQueueSubscriberHotPoisonSignalPublishOn() throws Exception { WorkQueueProcessor<Integer> wq = WorkQueueProcessor.<Integer>builder().autoCancel(false).build(); AtomicInteger onNextSignals = new AtomicInteger();
@Test public void retryErrorPropagatedFromWorkQueueSubscriberHotPoisonSignalPublishOnPrefetch1() throws Exception { WorkQueueProcessor<Integer> wq = WorkQueueProcessor.<Integer>builder().autoCancel(false).build(); AtomicInteger onNextSignals = new AtomicInteger();
@Test public void retryErrorPropagatedFromWorkQueueSubscriberHotPoisonSignal2() throws Exception { WorkQueueProcessor<Integer> wq = WorkQueueProcessor.<Integer>builder().autoCancel(false).build(); AtomicInteger onNextSignals = new AtomicInteger(); StepVerifier.create(wq.log() .doOnNext(e -> onNextSignals.incrementAndGet()).<Integer>handle( (s1, sink) -> { if (s1 == 2) { sink.error(new RuntimeException()); } else { sink.next(s1); } }).retry()) .then(() -> { wq.onNext(1); wq.onNext(2); wq.onNext(3); }) .expectNext(1, 3) .thenCancel() .verify(); assertThat(onNextSignals.get(), equalTo(3)); while (wq.downstreamCount() != 0 && Thread.activeCount() > 1) { } }