/** * Generates and flattens Publishers on each 'rail'. * <p> * Errors are not delayed and uses unbounded concurrency along with default inner * prefetch. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> flatMap(Function<? super T, ? extends Publisher<? extends R>> mapper) { return flatMap(mapper, false, Integer.MAX_VALUE, Queues.SMALL_BUFFER_SIZE); }
/** * Generates and flattens Publishers on each 'rail', optionally delaying errors and * having a total number of simultaneous subscriptions to the inner Publishers. * <p> * It uses a default inner prefetch. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * @param delayError should the errors from the main and the inner sources delayed * till everybody terminates? * @param maxConcurrency the maximum number of simultaneous subscriptions to the * generated inner Publishers * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> flatMap(Function<? super T, ? extends Publisher<? extends R>> mapper, boolean delayError, int maxConcurrency) { return flatMap(mapper, delayError, maxConcurrency, Queues.SMALL_BUFFER_SIZE); }
/** * Generates and flattens Publishers on each 'rail', optionally delaying errors. * <p> * It uses unbounded concurrency along with default inner prefetch. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * @param delayError should the errors from the main and the inner sources delayed * till everybody terminates? * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> flatMap(Function<? super T, ? extends Publisher<? extends R>> mapper, boolean delayError) { return flatMap(mapper, delayError, Integer.MAX_VALUE, Queues.SMALL_BUFFER_SIZE); }
@Test public void testFlatMapDelayErrorMaxConcurrency() { ParallelFlux<Integer> pf = ParallelFlux.from(Flux.range(1, 4), 2) .flatMap(i -> { if (i == 1) return Mono.<Integer>error(new IllegalStateException("boom")).hide(); return Flux.just(i, 100 * i); }, true, 2); StepVerifier.create(pf) .expectNext(2, 200, 3, 300, 4, 400) .verifyErrorMessage("boom"); }
@Test public void testFlatMapDelayError() { ParallelFlux<Integer> pf = ParallelFlux.from(Flux.range(1, 4), 2) .flatMap(i -> { if (i == 1) return Mono.<Integer>error(new IllegalStateException("boom")).hide(); return Flux.just(i, 100 * i); }, true); StepVerifier.create(pf) .expectNext(2, 200, 3, 300, 4, 400) .verifyErrorMessage("boom"); }
@Test public void flatMapUnordered() { AssertSubscriber<Integer> ts = AssertSubscriber.create(); Flux.range(1, 5) .parallel() .flatMap(v -> Flux.range(v * 10 + 1, 3)) .sequential() .subscribe(ts); ts.assertValues(11, 12, 13, 21, 22, 23, 31, 32, 33, 41, 42, 43, 51, 52, 53) .assertNoError() .assertComplete(); }
/** * Generates and flattens Publishers on each 'rail'. * <p> * Errors are not delayed and uses unbounded concurrency along with default inner * prefetch. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> flatMap(Function<? super T, ? extends Publisher<? extends R>> mapper) { return flatMap(mapper, false, Integer.MAX_VALUE, Queues.SMALL_BUFFER_SIZE); }
/** * Generates and flattens Publishers on each 'rail', optionally delaying errors. * <p> * It uses unbounded concurrency along with default inner prefetch. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * @param delayError should the errors from the main and the inner sources delayed * till everybody terminates? * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> flatMap(Function<? super T, ? extends Publisher<? extends R>> mapper, boolean delayError) { return flatMap(mapper, delayError, Integer.MAX_VALUE, Queues.SMALL_BUFFER_SIZE); }
/** * Generates and flattens Publishers on each 'rail', optionally delaying errors and * having a total number of simultaneous subscriptions to the inner Publishers. * <p> * It uses a default inner prefetch. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * @param delayError should the errors from the main and the inner sources delayed * till everybody terminates? * @param maxConcurrency the maximum number of simultaneous subscriptions to the * generated inner Publishers * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> flatMap(Function<? super T, ? extends Publisher<? extends R>> mapper, boolean delayError, int maxConcurrency) { return flatMap(mapper, delayError, maxConcurrency, Queues.SMALL_BUFFER_SIZE); }
public static void runParallel(Flux<Runnable> runnables) { runnables .publishOn(Schedulers.elastic()) .parallel() .runOn(Schedulers.elastic()) .flatMap(runnable -> { runnable.run(); return Mono.empty(); }) .sequential() .then() .block(); } }