/** * Specifies where each 'rail' will observe its incoming values with no work-stealing * and default prefetch amount. * <p> * This operator uses the default prefetch size returned by {@code * Queues.SMALL_BUFFER_SIZE}. * <p> * The operator will call {@code Scheduler.createWorker()} as many times as this * ParallelFlux's parallelism level is. * <p> * No assumptions are made about the Scheduler's parallelism level, if the Scheduler's * parallelism level is lower than the ParallelFlux's, some rails may end up on * the same thread/worker. * <p> * This operator doesn't require the Scheduler to be trampolining as it does its own * built-in trampolining logic. * * @param scheduler the scheduler to use * * @return the new {@link ParallelFlux} instance */ public final ParallelFlux<T> runOn(Scheduler scheduler) { return runOn(scheduler, Queues.SMALL_BUFFER_SIZE); }
@Test public void advancedParallelParallelized() { Flux.range(1, 10) .parallel(2) .runOn(Schedulers.parallel()) .subscribe(i -> System.out.println(Thread.currentThread().getName() + " -> " + i)); }
@Test public void testParallelism() throws Exception { Flux<Integer> flux = Flux.just(1, 2, 3); Set<String> threadNames = Collections.synchronizedSet(new TreeSet<>()); AtomicInteger count = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(3); flux // Uncomment line below for failure .cache(1) .parallel(3) .runOn(Schedulers.newElastic("TEST")) .subscribe(i -> { threadNames.add(Thread.currentThread() .getName()); count.incrementAndGet(); latch.countDown(); tryToSleep(1000); }); latch.await(); Assert.assertEquals("Multithreaded count", 3, count.get()); Assert.assertEquals("Multithreaded threads", 3, threadNames.size()); }
/** * https://gist.github.com/nithril/444d8373ce67f0a8b853 Contribution by Nicolas Labrot * @throws InterruptedException on interrupt */ @Test public void testParallelWithJava8StreamsInput() throws InterruptedException { Scheduler supplier = Schedulers.newParallel("test-p", 2); int max = ThreadLocalRandom.current() .nextInt(100, 300); CountDownLatch countDownLatch = new CountDownLatch(max); Flux<Integer> worker = Flux.range(0, max) .publishOn(asyncGroup); worker.parallel(2) .runOn(supplier) .map(v -> v) .subscribe(v -> countDownLatch.countDown()); countDownLatch.await(10, TimeUnit.SECONDS); Assert.assertEquals(0, countDownLatch.getCount()); }
@Test public void fromPublishersSequentialSubscribe() { List<Integer> values = Collections.synchronizedList(new ArrayList<>(10)); ParallelFlux.from(Flux.range(1, 3), Flux.range(4, 3)) .runOn(Schedulers.parallel()) .doOnNext(values::add) .sequential() .blockLast(); assertThat(values) .hasSize(6) .containsExactlyInAnyOrder(1, 2, 3, 4, 5, 6); }
@Test public void testDoOnEachSignalWithError() throws InterruptedException { List<Signal<Integer>> signals = Collections.synchronizedList(new ArrayList<>(4)); ParallelFlux<Integer> flux = Flux.<Integer>error(new IllegalArgumentException("boom")).parallel(2) .runOn(Schedulers.parallel()) .doOnEach(signals::add); //we use a lambda subscriber and latch to avoid using `sequential` CountDownLatch latch = new CountDownLatch(2); flux.subscribe(v -> { }, e -> latch.countDown(), latch::countDown); assertTrue(latch.await(2, TimeUnit.SECONDS)); assertThat(signals).hasSize(2); assertTrue("rail 1 onError expected", signals.get(0) .isOnError()); assertTrue("rail 2 onError expected", signals.get(1) .isOnError()); assertThat(signals.get(0).getThrowable()).as("plain exception rail 1 expected") .hasMessage("boom"); assertThat(signals.get(1).getThrowable()).as("plain exception rail 2 expected") .hasMessage("boom"); }
@Test public void runOnNegativePrefetchRejected() { ParallelFlux<Integer> validSoFar = ParallelFlux.from(Mono.just(1)); Assertions.assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> validSoFar.runOn(Schedulers.parallel(), -1)) .withMessage("prefetch > 0 required but it was -1"); }
@Test public void composeGroupMaintainsParallelismAndPrefetch() { ParallelFlux<Integer> parallelFlux = Flux.range(1, 10) .parallel(3) .runOn(Schedulers.parallel(), 123); ParallelFlux<Integer> composed = parallelFlux.composeGroup(rail -> rail.map(i -> i + 2)); assertThat(composed.parallelism()) .as("maintains parallelism") .isEqualTo(parallelFlux.parallelism()) .isEqualTo(3); assertThat(composed.getPrefetch()) .as("maintains prefetch") .isEqualTo(parallelFlux.getPrefetch()) .isEqualTo(123); }
@Test public void runOnZeroPrefetchRejected() { ParallelFlux<Integer> validSoFar = ParallelFlux.from(Mono.just(1)); Assertions.assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> validSoFar.runOn(Schedulers.parallel(), 0)) .withMessage("prefetch > 0 required but it was 0"); }
@Test(timeout = 10000L) public void collectFromMultipleThread1() throws Exception { EmitterProcessor<Integer> head = EmitterProcessor.create(); AtomicInteger sum = new AtomicInteger(); int length = 1000; int batchSize = 333; int latchCount = length / batchSize; CountDownLatch latch = new CountDownLatch(latchCount); head .publishOn(Schedulers.parallel()) .parallel(3) .runOn(Schedulers.parallel()) .collect(ArrayList::new, List::add) .subscribe(ints -> { sum.addAndGet(ints.size()); latch.countDown(); }); Flux.range(1, 1000).subscribe(head); latch.await(); Assert.assertTrue(sum.get() == length); }
.index() .parallel(PARALLELISM) .runOn(SCHEDULER) .doOnNext(t2 -> disordered.add(t2.getT2())) .ordered(Comparator.comparing(Tuple2::getT1))
@Test public void transformChangesPrefetch() { assertThat(ParallelFlux.from(Flux.range(1, 10), 3, 12, Queues.small()) .transform(pf -> pf.runOn(Schedulers.parallel(), 3) .log() .hide()) .getPrefetch()) .isEqualTo(3); }
@Test public void fromFuseableUsesThreadBarrier() { final Set<String> between = new HashSet<>(); final ConcurrentHashMap<String, String> processing = new ConcurrentHashMap<>(); Flux<Integer> test = Flux.range(1, 10) .publishOn(Schedulers.single(), false, 1) .doOnNext(v -> between.add(Thread.currentThread() .getName())) .parallel(2, 1) .runOn(Schedulers.elastic(), 1) .map(v -> { processing.putIfAbsent(Thread.currentThread() .getName(), ""); return v; }) .sequential(); StepVerifier.create(test) .expectSubscription() .recordWith(() -> Collections.synchronizedList(new ArrayList<>(10))) .expectNextCount(10) .consumeRecordedWith(r -> assertThat(r).containsExactlyInAnyOrder(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) .expectComplete() .verify(Duration.ofSeconds(5)); assertThat(between).hasSize(1); assertThat(between).first() .asString() .startsWith("single-"); assertThat(processing.keySet()) .allSatisfy(k -> assertThat(k).startsWith("elastic-")); }
@Test public void consistentMultithreadingWithPartition() throws InterruptedException { Scheduler supplier1 = Schedulers.newParallel("groupByPool", 2); Scheduler supplier2 = Schedulers.newParallel("partitionPool", 5); CountDownLatch latch = new CountDownLatch(10); /*Disposable c = */Flux.range(1, 10) .groupBy(n -> n % 2 == 0) .flatMap(stream -> stream.publishOn(supplier1) .log("groupBy-" + stream.key())) .parallel(5) .runOn(supplier2) .sequential() .publishOn(asyncGroup) .log("join") .subscribe(t -> { latch.countDown(); }); latch.await(30, TimeUnit.SECONDS); assertThat("Not totally dispatched: " + latch.getCount(), latch.getCount() == 0); supplier1.dispose(); supplier2.dispose(); }
@Test public void composeGroup() { Set<Integer> values = new ConcurrentSkipListSet<>(); Flux<Integer> flux = Flux.range(1, 10) .parallel(3) .runOn(Schedulers.parallel()) .doOnNext(values::add) .composeGroup(p -> p.log("rail" + p.key()) .map(i -> (p.key() + 1) * 100 + i)) .sequential(); StepVerifier.create(flux.sort()) .assertNext(i -> assertThat(i - 100) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 1) .assertNext(i -> assertThat(i - 200) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 2) .assertNext(i -> assertThat(i - 300) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 3) .verifyComplete(); assertThat(values) .hasSize(10) .contains(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); }
.runOn(scheduler) .map(v -> v + 1) .log("test", Level.INFO, true, SignalType.ON_SUBSCRIBE)
ParallelFlux<Long> flux = Flux.interval(Duration.ofMillis(2)).take(255) .parallel(1) .runOn(scheduler) .doOnNext(i -> onNext(i)) .doOnError(e -> onError(e));
.runOn(Schedulers.newParallel("par", 4)) .transform(flux -> ParallelFlux.from(flux.groups() .flatMap(s -> s.publish()
@Override public ReactiveProcessor onPipeline(ReactiveProcessor pipeline) { reactor.core.scheduler.Scheduler scheduler = fromExecutorService(decorateScheduler(getCpuLightScheduler())); if (maxConcurrency > subscribers) { return publisher -> { return from(publisher).parallel(parallelism) .runOn(scheduler) .composeGroup(pipeline); }; } else { return super.onPipeline(pipeline); } }
@Override public ReactiveProcessor onProcessor(ReactiveProcessor processor) { reactor.core.scheduler.Scheduler scheduler = fromExecutorService(decorateScheduler(getCpuLightScheduler())); if (processor.getProcessingType() == CPU_LITE_ASYNC) { return publisher -> { return from(publisher).transform(processor).parallel(parallelism) .runOn(scheduler); }; } else { return super.onProcessor(processor); } }