/** * See https://github.com/reactor/reactor/issues/451 * @throws Exception for convenience */ @Test public void partitionByHashCodeShouldNeverCreateMoreStreamsThanSpecified() throws Exception { Flux<Integer> stream = Flux.range(-10, 20) .map(Integer::intValue); assertThat(stream.parallel(2) .groups() .count() .block(), is(equalTo(2L))); }
/** * Allows composing operators off the 'rails', as individual {@link GroupedFlux} instances keyed by * the zero based rail's index. The transformed groups are {@link Flux#parallel parallelized} back * once the transformation has been applied. * <p> * Note that like in {@link #groups()}, requests and cancellation compose through, and * cancelling only one rail may result in undefined behavior. * * @param composer the composition function to apply on each {@link GroupedFlux rail} * @param <U> the type of the resulting parallelized flux * @return a {@link ParallelFlux} of the composed groups */ public final <U> ParallelFlux<U> composeGroup(Function<? super GroupedFlux<Integer, T>, ? extends Publisher<? extends U>> composer) { if (getPrefetch() > -1) { return from(groups().flatMap(composer::apply), parallelism(), getPrefetch(), Queues.small()); } else { return from(groups().flatMap(composer::apply), parallelism()); } }
deferred.publishOn(asyncGroup) .parallel(8) .groups() .subscribe(stream -> stream.publishOn(asyncGroup) .bufferTimeout(1000 / 8, Duration.ofSeconds(1))
@Test public void testBeyondLongMaxMicroBatching() throws InterruptedException { List<Integer> tasks = IntStream.range(0, 1500) .boxed() .collect(Collectors.toList()); CountDownLatch countDownLatch = new CountDownLatch(tasks.size()); Flux<Integer> worker = Flux.fromIterable(tasks) .log("before", Level.FINE) .publishOn(asyncGroup); /*Disposable tail = */worker.log("after", Level.FINE) .parallel(2) .groups() .subscribe(s -> s.log("w"+s.key(), Level.FINE) .publishOn(asyncGroup) .map(v -> v) .subscribe(v -> countDownLatch.countDown(), Throwable::printStackTrace)); countDownLatch.await(5, TimeUnit.SECONDS); Assert.assertEquals("Count max: "+ tasks.size(), 0, countDownLatch.getCount()); }
mapManydeferred = EmitterProcessor.create(); mapManydeferred.parallel(4) .groups() .subscribe(substream -> substream.publishOn(asyncGroup) .subscribe(i -> latch.countDown()));
batchingStreamDef.publishOn(asyncGroup) .parallel(PARALLEL_STREAMS) .groups() .subscribe(substream -> substream.hide().publishOn(asyncGroup) .bufferTimeout(BATCH_SIZE, Duration.ofMillis(TIMEOUT))
.groups() .flatMap(stream -> stream.publishOn(Schedulers.parallel()) .map((String str) -> {
deferred.publishOn(asyncGroup) .parallel(2) .groups() .subscribe(stream -> stream.publishOn(asyncGroup) .map(i -> i)
.groups() .subscribe(stream -> stream.publishOn(Schedulers.parallel()) .map(o -> {
@Test public void groupMerge() { AssertSubscriber<Integer> ts = AssertSubscriber.create(); Flux.range(1, 10) .parallel() .groups() .flatMap(v -> v) .subscribe(ts); ts.assertContainValues(new HashSet<>(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))) .assertNoError() .assertComplete(); }
@Test public void shouldCorrectlyDispatchBatchedTimeout() throws InterruptedException { long timeout = 100; final int batchsize = 4; int parallelStreams = 16; CountDownLatch latch = new CountDownLatch(1); final EmitterProcessor<Integer> streamBatcher = EmitterProcessor.create(); streamBatcher.publishOn(asyncGroup) .bufferTimeout(batchsize, Duration.ofSeconds(timeout)) .log("batched") .parallel(parallelStreams) .groups() .log("batched-inner") .subscribe(innerStream -> innerStream.publishOn(asyncGroup) .doOnError(Throwable::printStackTrace) .subscribe(i -> latch.countDown())); streamBatcher.onNext(12); streamBatcher.onNext(123); streamBatcher.onNext(42); streamBatcher.onNext(666); boolean finished = latch.await(2, TimeUnit.SECONDS); if (!finished) { throw new RuntimeException(latch.getCount()+""); } else { assertEquals("Must have correct latch number : " + latch.getCount(), latch.getCount(), 0); } }
@Override Flux<Integer> transformFlux(Flux<Integer> f) { Flux<String> otherStream = Flux.just("test", "test2", "test3"); // System.out.println("Providing new downstream"); Scheduler asyncGroup = Schedulers.newParallel("flux-p-tck", 2); BiFunction<Integer, String, Integer> combinator = (t1, t2) -> t1; return f.publishOn(sharedGroup) .parallel(2) .groups() .flatMap(stream -> stream.publishOn(asyncGroup) .doOnNext(this::monitorThreadUse) .scan((prev, next) -> next) .map(integer -> -integer) .filter(integer -> integer <= 0) .map(integer -> -integer) .bufferTimeout(batch, Duration.ofMillis(50)) .flatMap(Flux::fromIterable) .flatMap(i -> Flux.zip(Flux.just(i), otherStream, combinator)) ) .publishOn(sharedGroup) .doAfterTerminate(asyncGroup::dispose) .doOnError(Throwable::printStackTrace); }
.transform(flux -> ParallelFlux.from(flux.groups() .flatMap(s -> s.publish() .autoConnect()
/** * Allows composing operators off the 'rails', as individual {@link GroupedFlux} instances keyed by * the zero based rail's index. The transformed groups are {@link Flux#parallel parallelized} back * once the transformation has been applied. * <p> * Note that like in {@link #groups()}, requests and cancellation compose through, and * cancelling only one rail may result in undefined behavior. * * @param composer the composition function to apply on each {@link GroupedFlux rail} * @param <U> the type of the resulting parallelized flux * @return a {@link ParallelFlux} of the composed groups */ public final <U> ParallelFlux<U> composeGroup(Function<? super GroupedFlux<Integer, T>, ? extends Publisher<? extends U>> composer) { if (getPrefetch() > -1) { return from(groups().flatMap(composer::apply), parallelism(), getPrefetch(), Queues.small()); } else { return from(groups().flatMap(composer::apply), parallelism()); } }