/** * Sorts the 'rails' of this {@link ParallelFlux} and returns a Publisher that * sequentially picks the smallest next value from the rails. * <p> * This operator requires a finite source ParallelFlux. * * @param comparator the comparator to use * @param capacityHint the expected number of total elements * * @return the new Flux instance */ public final Flux<T> sorted(Comparator<? super T> comparator, int capacityHint) { int ch = capacityHint / parallelism() + 1; ParallelFlux<List<T>> railReduced = reduce(() -> new ArrayList<>(ch), (a, b) -> { a.add(b); return a; }); ParallelFlux<List<T>> railSorted = railReduced.map(list -> { list.sort(comparator); return list; }); return Flux.onAssembly(new ParallelMergeSort<>(railSorted, comparator)); }
/** * Sorts the 'rails' according to the comparator and returns a full sorted list as a * Publisher. * <p> * This operator requires a finite source ParallelFlux. * * @param comparator the comparator to compare elements * @param capacityHint the expected number of total elements * * @return the new Mono instance */ public final Mono<List<T>> collectSortedList(Comparator<? super T> comparator, int capacityHint) { int ch = capacityHint / parallelism() + 1; ParallelFlux<List<T>> railReduced = reduce(() -> new ArrayList<>(ch), (a, b) -> { a.add(b); return a; }); ParallelFlux<List<T>> railSorted = railReduced.map(list -> { list.sort(comparator); return list; }); Mono<List<T>> merged = railSorted.reduce((a, b) -> sortedMerger(a, b, comparator)); return merged; }
@Test public void scannableNameParallelFluxDefaultsToToString() { final ParallelFlux<Integer> flux = ParallelFlux.from(Mono.just(1)) .map(i -> i + 10); assertThat(Scannable.from(flux).name()) .isEqualTo(Scannable.from(flux).stepName()) .isEqualTo("map"); }
/** * https://gist.github.com/nithril/444d8373ce67f0a8b853 Contribution by Nicolas Labrot * @throws InterruptedException on interrupt */ @Test public void testParallelWithJava8StreamsInput() throws InterruptedException { Scheduler supplier = Schedulers.newParallel("test-p", 2); int max = ThreadLocalRandom.current() .nextInt(100, 300); CountDownLatch countDownLatch = new CountDownLatch(max); Flux<Integer> worker = Flux.range(0, max) .publishOn(asyncGroup); worker.parallel(2) .runOn(supplier) .map(v -> v) .subscribe(v -> countDownLatch.countDown()); countDownLatch.await(10, TimeUnit.SECONDS); Assert.assertEquals(0, countDownLatch.getCount()); }
@Test public void composeGroupMaintainsParallelism() { ParallelFlux<Integer> parallelFlux = Flux.range(1, 10) .parallel(3) .map(i -> i + 2); ParallelFlux<Integer> composed = parallelFlux.composeGroup(rail -> rail.map(i -> i + 2)); assertThat(composed.parallelism()) .as("maintains parallelism") .isEqualTo(parallelFlux.parallelism()) .isEqualTo(3); assertThat(parallelFlux.getPrefetch()) .as("parallel source no prefetch") .isEqualTo(-1); assertThat(composed.getPrefetch()) .as("reset prefetch to default") .isNotEqualTo(parallelFlux.getPrefetch()) .isEqualTo(Queues.SMALL_BUFFER_SIZE); }
@Test public void fromFuseableUsesThreadBarrier() { final Set<String> between = new HashSet<>(); final ConcurrentHashMap<String, String> processing = new ConcurrentHashMap<>(); Flux<Integer> test = Flux.range(1, 10) .publishOn(Schedulers.single(), false, 1) .doOnNext(v -> between.add(Thread.currentThread() .getName())) .parallel(2, 1) .runOn(Schedulers.elastic(), 1) .map(v -> { processing.putIfAbsent(Thread.currentThread() .getName(), ""); return v; }) .sequential(); StepVerifier.create(test) .expectSubscription() .recordWith(() -> Collections.synchronizedList(new ArrayList<>(10))) .expectNextCount(10) .consumeRecordedWith(r -> assertThat(r).containsExactlyInAnyOrder(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) .expectComplete() .verify(Duration.ofSeconds(5)); assertThat(between).hasSize(1); assertThat(between).first() .asString() .startsWith("single-"); assertThat(processing.keySet()) .allSatisfy(k -> assertThat(k).startsWith("elastic-")); }
Flux<Integer> result = ParallelFlux.from(source, ncpu) .runOn(scheduler) .map(v -> v + 1) .log("test", Level.INFO, true, SignalType.ON_SUBSCRIBE) .sequential();
/** * Sorts the 'rails' according to the comparator and returns a full sorted list as a * Publisher. * <p> * This operator requires a finite source ParallelFlux. * * @param comparator the comparator to compare elements * @param capacityHint the expected number of total elements * * @return the new Mono instance */ public final Mono<List<T>> collectSortedList(Comparator<? super T> comparator, int capacityHint) { int ch = capacityHint / parallelism() + 1; ParallelFlux<List<T>> railReduced = reduce(() -> new ArrayList<>(ch), (a, b) -> { a.add(b); return a; }); ParallelFlux<List<T>> railSorted = railReduced.map(list -> { list.sort(comparator); return list; }); Mono<List<T>> merged = railSorted.reduce((a, b) -> sortedMerger(a, b, comparator)); return merged; }
/** * Sorts the 'rails' of this {@link ParallelFlux} and returns a Publisher that * sequentially picks the smallest next value from the rails. * <p> * This operator requires a finite source ParallelFlux. * * @param comparator the comparator to use * @param capacityHint the expected number of total elements * * @return the new Flux instance */ public final Flux<T> sorted(Comparator<? super T> comparator, int capacityHint) { int ch = capacityHint / parallelism() + 1; ParallelFlux<List<T>> railReduced = reduce(() -> new ArrayList<>(ch), (a, b) -> { a.add(b); return a; }); ParallelFlux<List<T>> railSorted = railReduced.map(list -> { list.sort(comparator); return list; }); return Flux.onAssembly(new ParallelMergeSort<>(railSorted, comparator)); }