@Override public K key() { return source.key(); }
@Override public void subscribe(CoreSubscriber<? super O> actual) { CoreSubscriber<? super I> input = lifter.apply(source, actual); Objects.requireNonNull(input, "Lifted subscriber MUST NOT be null"); source.subscribe(input); } }
@Override protected List<Scenario<String, GroupedFlux<Integer, String>>> scenarios_operatorSuccess() { return Arrays.asList( scenario(f -> f.groupBy(String::hashCode)) .receive(g -> assertThat(g.key()).isEqualTo(g.blockFirst().hashCode()), g -> assertThat(g.key()).isEqualTo(g.blockFirst().hashCode()), g -> assertThat(g.key()).isEqualTo(g.blockFirst().hashCode())) ); }
@Test public void analyticsTest() throws Exception { ReplayProcessor<Integer> source = ReplayProcessor.create(); long avgTime = 50l; Mono<Long> result = source .log("delay") .publishOn(asyncGroup) .delayElements(Duration.ofMillis(avgTime)) .elapsed() .skip(1) .groupBy(w -> w.getT1()) .flatMap(w -> w.count().map(c -> Tuples.of(w.key(), c))) .log("elapsed") .collectSortedList(Comparator.comparing(Tuple2::getT1)) .flatMapMany(Flux::fromIterable) .reduce(-1L, (acc, next) -> acc > 0l ? ((next.getT1() + acc) / 2) : next.getT1()) .log("reduced-elapsed") .cache(); source.subscribe(); for (int j = 0; j < 10; j++) { source.onNext(1); } source.onComplete(); Assert.assertTrue(result.block(Duration.ofSeconds(5)) >= avgTime * 0.6); }
@Test public void consistentMultithreadingWithPartition() throws InterruptedException { Scheduler supplier1 = Schedulers.newParallel("groupByPool", 2); Scheduler supplier2 = Schedulers.newParallel("partitionPool", 5); CountDownLatch latch = new CountDownLatch(10); /*Disposable c = */Flux.range(1, 10) .groupBy(n -> n % 2 == 0) .flatMap(stream -> stream.publishOn(supplier1) .log("groupBy-" + stream.key())) .parallel(5) .runOn(supplier2) .sequential() .publishOn(asyncGroup) .log("join") .subscribe(t -> { latch.countDown(); }); latch.await(30, TimeUnit.SECONDS); assertThat("Not totally dispatched: " + latch.getCount(), latch.getCount() == 0); supplier1.dispose(); supplier2.dispose(); }
@Test public void testBeyondLongMaxMicroBatching() throws InterruptedException { List<Integer> tasks = IntStream.range(0, 1500) .boxed() .collect(Collectors.toList()); CountDownLatch countDownLatch = new CountDownLatch(tasks.size()); Flux<Integer> worker = Flux.fromIterable(tasks) .log("before", Level.FINE) .publishOn(asyncGroup); /*Disposable tail = */worker.log("after", Level.FINE) .parallel(2) .groups() .subscribe(s -> s.log("w"+s.key(), Level.FINE) .publishOn(asyncGroup) .map(v -> v) .subscribe(v -> countDownLatch.countDown(), Throwable::printStackTrace)); countDownLatch.await(5, TimeUnit.SECONDS); Assert.assertEquals("Count max: "+ tasks.size(), 0, countDownLatch.getCount()); }
@Test public void advancedBatchingGrouping() { StepVerifier.create( Flux.just(1, 3, 5, 2, 4, 6, 11, 12, 13) .groupBy(i -> i % 2 == 0 ? "even" : "odd") .concatMap(g -> g.defaultIfEmpty(-1) //if empty groups, show them .map(String::valueOf) //map to string .startWith(g.key())) //start with the group's key ) .expectNext("odd", "1", "3", "5", "11", "13") .expectNext("even", "2", "4", "6", "12") .verifyComplete(); }
.parallel(8) .groups() .subscribe(stream -> stream.publishOn(asyncGroup) .bufferTimeout(1000 / 8, Duration.ofSeconds(1)) .subscribe(batch -> {
@Override public int getPrefetch() { return source.getPrefetch(); }
@Test public void sampleTest() throws Exception { CountDownLatch latch = new CountDownLatch(1); Disposable top10every1second = Flux.fromIterable(PULP_SAMPLE) .publishOn(asyncGroup) .flatMap(samuelJackson -> Flux .fromArray(samuelJackson.split(" ")) .publishOn(asyncGroup) .filter(w -> !w.trim().isEmpty()) .doOnNext(i -> simulateLatency()) ) .window(Duration.ofSeconds(2)) .flatMap(s -> s.groupBy(w -> w) .flatMap(w -> w.count().map(c -> Tuples.of(w.key(), c))) .collectSortedList((a, b) -> -a.getT2().compareTo(b.getT2())) .flatMapMany(Flux::fromIterable) .take(10) .doAfterTerminate(() -> LOG.info("------------------------ window terminated" + "----------------------")) ) .subscribe( entry -> LOG.info(entry.getT1() + ": " + entry.getT2()), error -> LOG.error("", error), latch::countDown ); awaitLatch(top10every1second, latch); }
public Flux<?> flux() { Scheduler scheduler = Schedulers.newElastic("sample", 60, true); return KafkaReceiver.create(receiverOptions(Collections.singleton(topic)).commitInterval(Duration.ZERO)) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .flatMap(partitionFlux -> partitionFlux.publishOn(scheduler) .map(r -> processRecord(partitionFlux.key(), r)) .sample(Duration.ofMillis(5000)) .concatMap(offset -> offset.commit())) .doOnCancel(() -> close()); } public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) {
@Test public void composeGroup() { Set<Integer> values = new ConcurrentSkipListSet<>(); Flux<Integer> flux = Flux.range(1, 10) .parallel(3) .runOn(Schedulers.parallel()) .doOnNext(values::add) .composeGroup(p -> p.log("rail" + p.key()) .map(i -> (p.key() + 1) * 100 + i)) .sequential(); StepVerifier.create(flux.sort()) .assertNext(i -> assertThat(i - 100) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 1) .assertNext(i -> assertThat(i - 200) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 2) .assertNext(i -> assertThat(i - 300) .isBetween(1, 10)) .thenConsumeWhile(i -> i / 100 == 3) .verifyComplete(); assertThat(values) .hasSize(10) .contains(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); }
mapManydeferred.parallel(4) .groups() .subscribe(substream -> substream.publishOn(asyncGroup) .subscribe(i -> latch.countDown())); break;
@Override public int getPrefetch() { return source.getPrefetch(); }
@Override public K key() { return source.key(); }
private Mono<Average> calculateAverage(GroupedFlux<Integer, Sensor> group) { return group .reduce(new Accumulator(0, 0), (a, d) -> new Accumulator(a.getCount() + 1, a.getTotalValue() + d.getTemperature())) .map(accumulator -> new Average(group.key(), (accumulator.getTotalValue()) / accumulator.getCount())); }
@Test public void data6() { // tag::6[] Flux.just("alpha", "bravo", "charlie") .map(String::toUpperCase) .flatMap(s -> Flux.fromArray(s.split(""))) .groupBy(String::toString) .sort((o1, o2) -> o1.key().compareTo(o2.key())) .flatMap(group -> Mono.just(group.key()).zipWith(group.count())) .map(keyAndCount -> keyAndCount.getT1() + " => " + keyAndCount.getT2()) .subscribe(System.out::println); // end::6[] }
.parallel(8) .groups() .flatMap(stream -> stream.publishOn(Schedulers.parallel()) .map((String str) -> { try {
@Override public void subscribe(CoreSubscriber<? super O> actual) { CoreSubscriber<? super I> input = lifter.apply(source, actual); Objects.requireNonNull(input, "Lifted subscriber MUST NOT be null"); if (actual instanceof Fuseable.QueueSubscription && !(input instanceof QueueSubscription)) { //user didn't produce a QueueSubscription, original was one input = new FluxHide.SuppressFuseableSubscriber<>(input); } //otherwise QS is not required or user already made a compatible conversion source.subscribe(input); } }