/** * Subscribes to this {@link ParallelFlux} and triggers the execution chain for all * 'rails'. */ public final Disposable subscribe(){ return subscribe(null, null, null); }
@Override public void subscribe(CoreSubscriber<? super T>[] subscribers) { source.subscribe(subscribers); } }
/** * Subscribes to this {@link ParallelFlux} by providing an onNext callback and * triggers the execution chain for all 'rails'. * * @param onNext consumer of onNext signals */ public final Disposable subscribe(Consumer<? super T> onNext){ return subscribe(onNext, null, null); }
/** * Subscribes to this {@link ParallelFlux} by providing an onNext and onError callback * and triggers the execution chain for all 'rails'. * * @param onNext consumer of onNext signals * @param onError consumer of error signal */ public final Disposable subscribe(@Nullable Consumer<? super T> onNext, Consumer<? super Throwable> onError){ return subscribe(onNext, onError, null); }
@Override protected void subscribe(CoreSubscriber<? super O>[] s) { @SuppressWarnings("unchecked") CoreSubscriber<? super I>[] subscribers = new CoreSubscriber[parallelism()]; int i = 0; while (i < subscribers.length) { subscribers[i] = Objects.requireNonNull(lifter.apply(source, s[i]), "Lifted subscriber MUST NOT be null"); i++; } source.subscribe(subscribers); } }
@Override public void subscribe(CoreSubscriber<? super R>[] subscribers) { if (!validate(subscribers)) { return; } int n = subscribers.length; @SuppressWarnings("unchecked") CoreSubscriber<T>[] parents = new CoreSubscriber[n]; for (int i = 0; i < n; i++) { parents[i] = FluxConcatMap.subscriber(subscribers[i], mapper, queueSupplier, prefetch, errorMode); } source.subscribe(parents); } }
@Override public void subscribe(CoreSubscriber<? super T>[] subscribers) { if (!validate(subscribers)) { return; } int n = subscribers.length; @SuppressWarnings("unchecked") CoreSubscriber<? super T>[] parents = new CoreSubscriber[n]; for (int i = 0; i < n; i++) { parents[i] = new FluxHide.HideSubscriber<>(subscribers[i]); } source.subscribe(parents); } }
@Override public void subscribe(CoreSubscriber<? super GroupedFlux<Integer, T>> actual) { int n = source.parallelism(); @SuppressWarnings("unchecked") ParallelInnerGroup<T>[] groups = new ParallelInnerGroup[n]; for (int i = 0; i < n; i++) { groups[i] = new ParallelInnerGroup<>(i); } FluxArray.subscribe(actual, groups); source.subscribe(groups); }
@Override public void subscribe(CoreSubscriber<? super T> actual) { FluxMergeOrdered.MergeOrderedMainProducer<T> main = new FluxMergeOrdered.MergeOrderedMainProducer<>(actual, valueComparator, prefetch, source.parallelism()); actual.onSubscribe(main); source.subscribe(main.subscribers); } }
@Override public void subscribe(CoreSubscriber<? super T> actual) { MergeSequentialMain<T> parent = new MergeSequentialMain<>(actual, source .parallelism(), prefetch, queueSupplier); actual.onSubscribe(parent); source.subscribe(parent.subscribers); }
@Override public void subscribe(CoreSubscriber<? super T> actual) { MergeReduceMain<T> parent = new MergeReduceMain<>(actual, source.parallelism(), reducer); actual.onSubscribe(parent); source.subscribe(parent.subscribers); }
@Override public void subscribe(CoreSubscriber<? super T> actual) { MergeSortMain<T> parent = new MergeSortMain<>(actual, source.parallelism(), comparator); actual.onSubscribe(parent); source.subscribe(parent.subscribers); }
/** * Subscribes to this {@link ParallelFlux} by providing an onNext, onError and * onComplete callback and triggers the execution chain for all 'rails'. * * @param onNext consumer of onNext signals * @param onError consumer of error signal * @param onComplete callback on completion signal */ public final Disposable subscribe( @Nullable Consumer<? super T> onNext, @Nullable Consumer<? super Throwable> onError, @Nullable Runnable onComplete) { return subscribe(onNext, onError, onComplete, null); }
@Test public void advancedParallelJustDivided() { Flux.range(1, 10) .parallel(2) //<1> .subscribe(i -> System.out.println(Thread.currentThread().getName() + " -> " + i)); }
@Test public void advancedParallelParallelized() { Flux.range(1, 10) .parallel(2) .runOn(Schedulers.parallel()) .subscribe(i -> System.out.println(Thread.currentThread().getName() + " -> " + i)); }
@Test public void subscribeOnNextOnErrorErrorsOnAllRails() { LongAdder valueAdder = new LongAdder(); LongAdder errorAdder = new LongAdder(); Flux.range(1, 3) .concatWith(Mono.error(new IllegalStateException("boom"))) .parallel(2) .subscribe(v -> valueAdder.increment(), e -> errorAdder.increment()); assertThat(valueAdder.intValue()).isEqualTo(3); assertThat(errorAdder.intValue()).isEqualTo(2); }
@Test public void parallelSubscribeAndDispose() throws InterruptedException { AtomicInteger nextCount = new AtomicInteger(); CountDownLatch cancelLatch = new CountDownLatch(1); TestPublisher<Integer> source = TestPublisher.create(); Disposable d = source .flux() .parallel(3) .doOnCancel(cancelLatch::countDown) .subscribe(i -> nextCount.incrementAndGet()); source.next(1, 2, 3); d.dispose(); source.emit(4, 5, 6); boolean finished = cancelLatch.await(300, TimeUnit.MILLISECONDS); assertThat(finished).as("cancelled latch").isTrue(); assertThat(d.isDisposed()).as("disposed").isTrue(); assertThat(nextCount.get()).as("received count").isEqualTo(3); }
@Test public void testPublisherSubscribeUsesSequential() { LongAdder valueCount = new LongAdder(); ParallelFlux<Integer> pf = ParallelFlux.from(Flux.range(1, 4), 2); pf.subscribe(new BaseSubscriber<Integer>() { @Override protected void hookOnSubscribe(Subscription subscription) { requestUnbounded(); } @Override protected void hookOnNext(Integer value) { valueCount.increment(); } }); assertThat(valueCount.intValue()).isEqualTo(4); }
/** * https://gist.github.com/nithril/444d8373ce67f0a8b853 Contribution by Nicolas Labrot * @throws InterruptedException on interrupt */ @Test public void testParallelWithJava8StreamsInput() throws InterruptedException { Scheduler supplier = Schedulers.newParallel("test-p", 2); int max = ThreadLocalRandom.current() .nextInt(100, 300); CountDownLatch countDownLatch = new CountDownLatch(max); Flux<Integer> worker = Flux.range(0, max) .publishOn(asyncGroup); worker.parallel(2) .runOn(supplier) .map(v -> v) .subscribe(v -> countDownLatch.countDown()); countDownLatch.await(10, TimeUnit.SECONDS); Assert.assertEquals(0, countDownLatch.getCount()); }
@Test public void testDoOnEachSignalToSubscriber() { AssertSubscriber<Integer> peekSubscriber = AssertSubscriber.create(); ParallelFlux<Integer> flux = Flux.just(1, 2) .parallel(3) .doOnEach(s -> s.accept(peekSubscriber)); flux.subscribe(); peekSubscriber.assertNotSubscribed(); peekSubscriber.assertValues(1, 2); Assertions.assertThatExceptionOfType(AssertionError.class) .isThrownBy(peekSubscriber::assertComplete) .withMessage("Multiple completions: 3"); }