/** * Wraps multiple Publishers into a {@link ParallelFlux} which runs them in parallel and * unordered. * * @param <T> the value type * @param publishers the array of publishers * * @return the new {@link ParallelFlux} instance */ @SafeVarargs public static <T> ParallelFlux<T> from(Publisher<T>... publishers) { return onAssembly(new ParallelArraySource<>(publishers)); }
/** * Allows composing operators, in assembly time, on top of this {@link ParallelFlux} * and returns another {@link ParallelFlux} with composed features. * * @param <U> the output value type * @param composer the composer function from {@link ParallelFlux} (this) to another * ParallelFlux * * @return the {@link ParallelFlux} returned by the function */ public final <U> ParallelFlux<U> transform(Function<? super ParallelFlux<T>, ParallelFlux<U>> composer) { return onAssembly(as(composer)); }
/** * Maps the source values on each 'rail' to another value. * <p> * Note that the same mapper function may be called from multiple threads * concurrently. * * @param <U> the output value type * @param mapper the mapper function turning Ts into Us. * * @return the new {@link ParallelFlux} instance */ public final <U> ParallelFlux<U> map(Function<? super T, ? extends U> mapper) { Objects.requireNonNull(mapper, "mapper"); return onAssembly(new ParallelMap<>(this, mapper)); }
/** * Filters the source values on each 'rail'. * <p> * Note that the same predicate may be called from multiple threads concurrently. * * @param predicate the function returning true to keep a value or false to drop a * value * * @return the new {@link ParallelFlux} instance */ public final ParallelFlux<T> filter(Predicate<? super T> predicate) { Objects.requireNonNull(predicate, "predicate"); return onAssembly(new ParallelFilter<>(this, predicate)); }
/** * Collect the elements in each rail into a collection supplied via a * collectionSupplier and collected into with a collector action, emitting the * collection at the end. * * @param <C> the collection type * @param collectionSupplier the supplier of the collection in each rail * @param collector the collector, taking the per-rail collection and the current * item * * @return the new {@link ParallelFlux} instance */ public final <C> ParallelFlux<C> collect(Supplier<? extends C> collectionSupplier, BiConsumer<? super C, ? super T> collector) { return onAssembly(new ParallelCollect<>(this, collectionSupplier, collector)); }
/** * Reduces all values within a 'rail' to a single value (with a possibly different * type) via a reducer function that is initialized on each rail from an * initialSupplier value. * <p> * Note that the same mapper function may be called from multiple threads * concurrently. * * @param <R> the reduced output type * @param initialSupplier the supplier for the initial value * @param reducer the function to reduce a previous output of reduce (or the initial * value supplied) with a current source value. * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> reduce(Supplier<R> initialSupplier, BiFunction<R, ? super T, R> reducer) { Objects.requireNonNull(initialSupplier, "initialSupplier"); Objects.requireNonNull(reducer, "reducer"); return onAssembly(new ParallelReduceSeed<>(this, initialSupplier, reducer)); }
/** * Take a Publisher and prepare to consume it on {@code parallelism} number of 'rails' * and in a round-robin fashion and use custom prefetch amount and queue * for dealing with the source Publisher's values. * * @param <T> the value type * @param source the source Publisher * @param parallelism the number of parallel rails * @param prefetch the number of values to prefetch from the source * @param queueSupplier the queue structure supplier to hold the prefetched values * from the source until there is a rail ready to process it. * * @return the new {@link ParallelFlux} instance */ public static <T> ParallelFlux<T> from(Publisher<? extends T> source, int parallelism, int prefetch, Supplier<Queue<T>> queueSupplier) { Objects.requireNonNull(queueSupplier, "queueSupplier"); Objects.requireNonNull(source, "source"); return onAssembly(new ParallelSource<>(source, parallelism, prefetch, queueSupplier)); }
/** * Generates and concatenates Publishers on each 'rail', optionally delaying errors * and using the given prefetch amount for generating Publishers upfront. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * @param prefetch the number of items to prefetch from each inner Publisher * @param errorMode the error handling, i.e., when to report errors from the main * source and the inner Publishers (immediate, boundary, end) * * @return the new {@link ParallelFlux} instance */ final <R> ParallelFlux<R> concatMap(Function<? super T, ? extends Publisher<? extends R>> mapper, int prefetch, ErrorMode errorMode) { return onAssembly(new ParallelConcatMap<>(this, mapper, Queues.get(prefetch), prefetch, errorMode)); }
/** * Specifies where each 'rail' will observe its incoming values with possibly * work-stealing and a given prefetch amount. * <p> * This operator uses the default prefetch size returned by {@code * Queues.SMALL_BUFFER_SIZE}. * <p> * The operator will call {@code Scheduler.createWorker()} as many times as this * ParallelFlux's parallelism level is. * <p> * No assumptions are made about the Scheduler's parallelism level, if the Scheduler's * parallelism level is lower than the ParallelFlux's, some rails may end up on * the same thread/worker. * <p> * This operator doesn't require the Scheduler to be trampolining as it does its own * built-in trampolining logic. * * @param scheduler the scheduler to use that rail's worker has run out of work. * @param prefetch the number of values to request on each 'rail' from the source * * @return the new {@link ParallelFlux} instance */ public final ParallelFlux<T> runOn(Scheduler scheduler, int prefetch) { Objects.requireNonNull(scheduler, "scheduler"); return onAssembly(new ParallelRunOn<>(this, scheduler, prefetch, Queues.get(prefetch))); }
/** * Generates and flattens Publishers on each 'rail', optionally delaying errors, * having a total number of simultaneous subscriptions to the inner Publishers and * using the given prefetch amount for the inner Publishers. * * @param <R> the result type * @param mapper the function to map each rail's value into a Publisher * @param delayError should the errors from the main and the inner sources delayed * till everybody terminates? * @param maxConcurrency the maximum number of simultaneous subscriptions to the * generated inner Publishers * @param prefetch the number of items to prefetch from each inner Publisher * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> flatMap(Function<? super T, ? extends Publisher<? extends R>> mapper, boolean delayError, int maxConcurrency, int prefetch) { return onAssembly(new ParallelFlatMap<>(this, mapper, delayError, maxConcurrency, Queues.get(maxConcurrency), prefetch, Queues.get(prefetch))); }
boolean showOperatorLine, SignalType... options) { return onAssembly(new ParallelLog<>(this, new SignalLogger<>(this, category, level, showOperatorLine, options)));
@SuppressWarnings("unchecked") static <T> ParallelFlux<T> doOnSignal(ParallelFlux<T> source, @Nullable Consumer<? super T> onNext, @Nullable Consumer<? super T> onAfterNext, @Nullable Consumer<? super Throwable> onError, @Nullable Runnable onComplete, @Nullable Runnable onAfterTerminate, @Nullable Consumer<? super Subscription> onSubscribe, @Nullable LongConsumer onRequest, @Nullable Runnable onCancel) { return onAssembly(new ParallelPeek<>(source, onNext, onAfterNext, onError, onComplete, onAfterTerminate, onSubscribe, onRequest, onCancel)); }
/** * Wraps multiple Publishers into a {@link ParallelFlux} which runs them in parallel and * unordered. * * @param <T> the value type * @param publishers the array of publishers * * @return the new {@link ParallelFlux} instance */ @SafeVarargs public static <T> ParallelFlux<T> from(Publisher<T>... publishers) { return onAssembly(new ParallelArraySource<>(publishers)); }
/** * Allows composing operators, in assembly time, on top of this {@link ParallelFlux} * and returns another {@link ParallelFlux} with composed features. * * @param <U> the output value type * @param composer the composer function from {@link ParallelFlux} (this) to another * ParallelFlux * * @return the {@link ParallelFlux} returned by the function */ public final <U> ParallelFlux<U> transform(Function<? super ParallelFlux<T>, ParallelFlux<U>> composer) { return onAssembly(as(composer)); }
/** * Filters the source values on each 'rail'. * <p> * Note that the same predicate may be called from multiple threads concurrently. * * @param predicate the function returning true to keep a value or false to drop a * value * * @return the new {@link ParallelFlux} instance */ public final ParallelFlux<T> filter(Predicate<? super T> predicate) { Objects.requireNonNull(predicate, "predicate"); return onAssembly(new ParallelFilter<>(this, predicate)); }
/** * Maps the source values on each 'rail' to another value. * <p> * Note that the same mapper function may be called from multiple threads * concurrently. * * @param <U> the output value type * @param mapper the mapper function turning Ts into Us. * * @return the new {@link ParallelFlux} instance */ public final <U> ParallelFlux<U> map(Function<? super T, ? extends U> mapper) { Objects.requireNonNull(mapper, "mapper"); return onAssembly(new ParallelMap<>(this, mapper)); }
/** * Collect the elements in each rail into a collection supplied via a * collectionSupplier and collected into with a collector action, emitting the * collection at the end. * * @param <C> the collection type * @param collectionSupplier the supplier of the collection in each rail * @param collector the collector, taking the per-rail collection and the current * item * * @return the new {@link ParallelFlux} instance */ public final <C> ParallelFlux<C> collect(Supplier<? extends C> collectionSupplier, BiConsumer<? super C, ? super T> collector) { return onAssembly(new ParallelCollect<>(this, collectionSupplier, collector)); }
/** * Take a Publisher and prepare to consume it on {@code parallelism} number of 'rails' * and in a round-robin fashion and use custom prefetch amount and queue * for dealing with the source Publisher's values. * * @param <T> the value type * @param source the source Publisher * @param parallelism the number of parallel rails * @param prefetch the number of values to prefetch from the source * @param queueSupplier the queue structure supplier to hold the prefetched values * from the source until there is a rail ready to process it. * * @return the new {@link ParallelFlux} instance */ public static <T> ParallelFlux<T> from(Publisher<? extends T> source, int parallelism, int prefetch, Supplier<Queue<T>> queueSupplier) { Objects.requireNonNull(queueSupplier, "queueSupplier"); Objects.requireNonNull(source, "source"); return onAssembly(new ParallelSource<>(source, parallelism, prefetch, queueSupplier)); }
/** * Reduces all values within a 'rail' to a single value (with a possibly different * type) via a reducer function that is initialized on each rail from an * initialSupplier value. * <p> * Note that the same mapper function may be called from multiple threads * concurrently. * * @param <R> the reduced output type * @param initialSupplier the supplier for the initial value * @param reducer the function to reduce a previous output of reduce (or the initial * value supplied) with a current source value. * * @return the new {@link ParallelFlux} instance */ public final <R> ParallelFlux<R> reduce(Supplier<R> initialSupplier, BiFunction<R, ? super T, R> reducer) { Objects.requireNonNull(initialSupplier, "initialSupplier"); Objects.requireNonNull(reducer, "reducer"); return onAssembly(new ParallelReduceSeed<>(this, initialSupplier, reducer)); }
@SuppressWarnings("unchecked") static <T> ParallelFlux<T> doOnSignal(ParallelFlux<T> source, @Nullable Consumer<? super T> onNext, @Nullable Consumer<? super T> onAfterNext, @Nullable Consumer<? super Throwable> onError, @Nullable Runnable onComplete, @Nullable Runnable onAfterTerminate, @Nullable Consumer<? super Subscription> onSubscribe, @Nullable LongConsumer onRequest, @Nullable Runnable onCancel) { return onAssembly(new ParallelPeek<>(source, onNext, onAfterNext, onError, onComplete, onAfterTerminate, onSubscribe, onRequest, onCancel)); }