/** * Generates and flattens Publishers on each 'rail'. * <p> * Errors are not delayed and uses unbounded concurrency along with default inner * prefetch. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> flatMap(Function<? super T, ? extends Publisher<? extends R>> mapper) { return flatMap(mapper, false, Integer.MAX_VALUE, Queues.SMALL_BUFFER_SIZE); }
@Test public void asChangesParallelism() { assertThat(ParallelFlux.from(Flux.range(1, 10), 3) .as(pf -> ParallelFlux.from(pf.sequential(), 5) .log("secondParallel")) .parallelism()) .isEqualTo(5); }
/** * Allows composing operators, in assembly time, on top of this {@link ParallelFlux} * and returns another {@link ParallelFlux} with composed features. * * @param <U> the output value type * @param composer the composer function from {@link ParallelFlux} (this) to another * ParallelFlux * * @return the {@link ParallelFlux} returned by the function */ public final <U> ParallelFlux<U> transform(Function<? super ParallelFlux<T>, ParallelFlux<U>> composer) { return onAssembly(as(composer)); }
/** * Allows composing operators off the 'rails', as individual {@link GroupedFlux} instances keyed by * the zero based rail's index. The transformed groups are {@link Flux#parallel parallelized} back * once the transformation has been applied. * <p> * Note that like in {@link #groups()}, requests and cancellation compose through, and * cancelling only one rail may result in undefined behavior. * * @param composer the composition function to apply on each {@link GroupedFlux rail} * @param <U> the type of the resulting parallelized flux * @return a {@link ParallelFlux} of the composed groups */ public final <U> ParallelFlux<U> composeGroup(Function<? super GroupedFlux<Integer, T>, ? extends Publisher<? extends U>> composer) { if (getPrefetch() > -1) { return from(groups().flatMap(composer::apply), parallelism(), getPrefetch(), Queues.small()); } else { return from(groups().flatMap(composer::apply), parallelism()); } }
@Override public void subscribe(CoreSubscriber<? super T> actual) { MergeSequentialMain<T> parent = new MergeSequentialMain<>(actual, source .parallelism(), prefetch, queueSupplier); actual.onSubscribe(parent); source.subscribe(parent.subscribers); }
Flux<Integer> result = ParallelFlux.from(source, ncpu) .runOn(scheduler) .map(v -> v + 1) .log("test", Level.INFO, true, SignalType.ON_SUBSCRIBE) .sequential();
.runOn(Schedulers.newParallel("par", 4)) .transform(flux -> ParallelFlux.from(flux.groups() .flatMap(s -> s.publish() .autoConnect() Queues.SMALL_BUFFER_SIZE, Queues.XS_BUFFER_SIZE))) .sequential() .retry()) .then(() -> {
@Test public void fromPublishersSequentialSubscribe() { List<Integer> values = Collections.synchronizedList(new ArrayList<>(10)); ParallelFlux.from(Flux.range(1, 3), Flux.range(4, 3)) .runOn(Schedulers.parallel()) .doOnNext(values::add) .sequential() .blockLast(); assertThat(values) .hasSize(6) .containsExactlyInAnyOrder(1, 2, 3, 4, 5, 6); }
@Test public void composeGroupMaintainsParallelismAndPrefetch() { ParallelFlux<Integer> parallelFlux = Flux.range(1, 10) .parallel(3) .runOn(Schedulers.parallel(), 123); ParallelFlux<Integer> composed = parallelFlux.composeGroup(rail -> rail.map(i -> i + 2)); assertThat(composed.parallelism()) .as("maintains parallelism") .isEqualTo(parallelFlux.parallelism()) .isEqualTo(3); assertThat(composed.getPrefetch()) .as("maintains prefetch") .isEqualTo(parallelFlux.getPrefetch()) .isEqualTo(123); }
/** * https://gist.github.com/nithril/444d8373ce67f0a8b853 Contribution by Nicolas Labrot * @throws InterruptedException on interrupt */ @Test public void testParallelWithJava8StreamsInput() throws InterruptedException { Scheduler supplier = Schedulers.newParallel("test-p", 2); int max = ThreadLocalRandom.current() .nextInt(100, 300); CountDownLatch countDownLatch = new CountDownLatch(max); Flux<Integer> worker = Flux.range(0, max) .publishOn(asyncGroup); worker.parallel(2) .runOn(supplier) .map(v -> v) .subscribe(v -> countDownLatch.countDown()); countDownLatch.await(10, TimeUnit.SECONDS); Assert.assertEquals(0, countDownLatch.getCount()); }
public static void runParallel(Flux<Runnable> runnables) { runnables .publishOn(Schedulers.elastic()) .parallel() .runOn(Schedulers.elastic()) .flatMap(runnable -> { runnable.run(); return Mono.empty(); }) .sequential() .then() .block(); } }
@Test public void fromFuseableUsesThreadBarrier() { final Set<String> between = new HashSet<>(); final ConcurrentHashMap<String, String> processing = new ConcurrentHashMap<>(); Flux<Integer> test = Flux.range(1, 10) .publishOn(Schedulers.single(), false, 1) .doOnNext(v -> between.add(Thread.currentThread() .getName())) .parallel(2, 1) .runOn(Schedulers.elastic(), 1) .map(v -> { processing.putIfAbsent(Thread.currentThread() .getName(), ""); return v; }) .sequential(); StepVerifier.create(test) .expectSubscription() .recordWith(() -> Collections.synchronizedList(new ArrayList<>(10))) .expectNextCount(10) .consumeRecordedWith(r -> assertThat(r).containsExactlyInAnyOrder(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) .expectComplete() .verify(Duration.ofSeconds(5)); assertThat(between).hasSize(1); assertThat(between).first() .asString() .startsWith("single-"); assertThat(processing.keySet()) .allSatisfy(k -> assertThat(k).startsWith("elastic-")); }
@Test public void composeGroup() { Set<Integer> values = new ConcurrentSkipListSet<>(); Flux<Integer> flux = Flux.range(1, 10) .parallel(3) .runOn(Schedulers.parallel()) .doOnNext(values::add) .composeGroup(p -> p.log("rail" + p.key()) .map(i -> (p.key() + 1) * 100 + i)) .sequential(); StepVerifier.create(flux.sort()) .assertNext(i -> assertThat(i - 100) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 1) .assertNext(i -> assertThat(i - 200) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 2) .assertNext(i -> assertThat(i - 300) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 3) .verifyComplete(); assertThat(values) .hasSize(10) .contains(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); }
@Test public void runOnNegativePrefetchRejected() { ParallelFlux<Integer> validSoFar = ParallelFlux.from(Mono.just(1)); Assertions.assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> validSoFar.runOn(Schedulers.parallel(), -1)) .withMessage("prefetch > 0 required but it was -1"); }
@Test public void sequentialZeroPrefetchRejected() { ParallelFlux<Integer> validSoFar = ParallelFlux.from(Mono.just(1)); Assertions.assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> validSoFar.sequential(0)) .withMessage("prefetch > 0 required but it was 0"); }
@Test public void parallelFluxCheckpointEmpty() { StringWriter sw = new StringWriter(); Flux<Integer> tested = Flux.range(1, 10) .parallel(2) .composeGroup(g -> g.map(i -> (Integer) null)) .checkpoint() .sequential() .doOnError(t -> t.printStackTrace(new PrintWriter(sw))); StepVerifier.create(tested) .verifyError(); String debugStack = sw.toString(); assertThat(debugStack).contains("Assembly trace from producer [reactor.core.publisher.ParallelSource] :"); }
@Test(timeout = 10000L) public void collectFromMultipleThread1() throws Exception { EmitterProcessor<Integer> head = EmitterProcessor.create(); AtomicInteger sum = new AtomicInteger(); int length = 1000; int batchSize = 333; int latchCount = length / batchSize; CountDownLatch latch = new CountDownLatch(latchCount); head .publishOn(Schedulers.parallel()) .parallel(3) .runOn(Schedulers.parallel()) .collect(ArrayList::new, List::add) .subscribe(ints -> { sum.addAndGet(ints.size()); latch.countDown(); }); Flux.range(1, 1000).subscribe(head); latch.await(); Assert.assertTrue(sum.get() == length); }
@Test public void testDoOnEachSignalWithError() throws InterruptedException { List<Signal<Integer>> signals = Collections.synchronizedList(new ArrayList<>(4)); ParallelFlux<Integer> flux = Flux.<Integer>error(new IllegalArgumentException("boom")).parallel(2) .runOn(Schedulers.parallel()) .doOnEach(signals::add); //we use a lambda subscriber and latch to avoid using `sequential` CountDownLatch latch = new CountDownLatch(2); flux.subscribe(v -> { }, e -> latch.countDown(), latch::countDown); assertTrue(latch.await(2, TimeUnit.SECONDS)); assertThat(signals).hasSize(2); assertTrue("rail 1 onError expected", signals.get(0) .isOnError()); assertTrue("rail 2 onError expected", signals.get(1) .isOnError()); assertThat(signals.get(0).getThrowable()).as("plain exception rail 1 expected") .hasMessage("boom"); assertThat(signals.get(1).getThrowable()).as("plain exception rail 2 expected") .hasMessage("boom"); }