.parallel(8) .groups() .subscribe(stream -> stream.publishOn(asyncGroup) .bufferTimeout(1000 / 8, Duration.ofSeconds(1)) .subscribe(batch -> {
mapManydeferred.parallel(4) .groups() .subscribe(substream -> substream.publishOn(asyncGroup) .subscribe(i -> latch.countDown())); break;
.parallel(8) .groups() .flatMap(stream -> stream.publishOn(Schedulers.parallel()) .map((String str) -> { try {
.parallel(2) .groups() .subscribe(stream -> stream.publishOn(asyncGroup) .map(i -> i) .scan(1, (acc, next) -> acc + next)
.parallel(8) .groups() .subscribe(stream -> stream.publishOn(Schedulers.parallel()) .map(o -> { synchronized (internalLock) {
@Test public void shouldCorrectlyDispatchBatchedTimeout() throws InterruptedException { long timeout = 100; final int batchsize = 4; int parallelStreams = 16; CountDownLatch latch = new CountDownLatch(1); final EmitterProcessor<Integer> streamBatcher = EmitterProcessor.create(); streamBatcher.publishOn(asyncGroup) .bufferTimeout(batchsize, Duration.ofSeconds(timeout)) .log("batched") .parallel(parallelStreams) .groups() .log("batched-inner") .subscribe(innerStream -> innerStream.publishOn(asyncGroup) .doOnError(Throwable::printStackTrace) .subscribe(i -> latch.countDown())); streamBatcher.onNext(12); streamBatcher.onNext(123); streamBatcher.onNext(42); streamBatcher.onNext(666); boolean finished = latch.await(2, TimeUnit.SECONDS); if (!finished) { throw new RuntimeException(latch.getCount()+""); } else { assertEquals("Must have correct latch number : " + latch.getCount(), latch.getCount(), 0); } }
@Test public void consistentMultithreadingWithPartition() throws InterruptedException { Scheduler supplier1 = Schedulers.newParallel("groupByPool", 2); Scheduler supplier2 = Schedulers.newParallel("partitionPool", 5); CountDownLatch latch = new CountDownLatch(10); /*Disposable c = */Flux.range(1, 10) .groupBy(n -> n % 2 == 0) .flatMap(stream -> stream.publishOn(supplier1) .log("groupBy-" + stream.key())) .parallel(5) .runOn(supplier2) .sequential() .publishOn(asyncGroup) .log("join") .subscribe(t -> { latch.countDown(); }); latch.await(30, TimeUnit.SECONDS); assertThat("Not totally dispatched: " + latch.getCount(), latch.getCount() == 0); supplier1.dispose(); supplier2.dispose(); }
@Override Flux<Integer> transformFlux(Flux<Integer> f) { Flux<String> otherStream = Flux.just("test", "test2", "test3"); // System.out.println("Providing new downstream"); Scheduler asyncGroup = Schedulers.newParallel("flux-p-tck", 2); BiFunction<Integer, String, Integer> combinator = (t1, t2) -> t1; return f.publishOn(sharedGroup) .parallel(2) .groups() .flatMap(stream -> stream.publishOn(asyncGroup) .doOnNext(this::monitorThreadUse) .scan((prev, next) -> next) .map(integer -> -integer) .filter(integer -> integer <= 0) .map(integer -> -integer) .bufferTimeout(batch, Duration.ofMillis(50)) .flatMap(Flux::fromIterable) .flatMap(i -> Flux.zip(Flux.just(i), otherStream, combinator)) ) .publishOn(sharedGroup) .doAfterTerminate(asyncGroup::dispose) .doOnError(Throwable::printStackTrace); }
public Flux<?> flux() { Scheduler scheduler = Schedulers.newElastic("sample", 60, true); return KafkaReceiver.create(receiverOptions(Collections.singleton(topic)).commitInterval(Duration.ZERO)) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .flatMap(partitionFlux -> partitionFlux.publishOn(scheduler) .map(r -> processRecord(partitionFlux.key(), r)) .sample(Duration.ofMillis(5000)) .concatMap(offset -> offset.commit())) .doOnCancel(() -> close()); } public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) {
.receive() .groupBy(m -> m.receiverOffset().topicPartition()) .subscribe(partitionFlux -> groupDisposables.add(partitionFlux.publishOn(scheduler, 1).subscribe(record -> { int partition = record.partition(); String thread = Thread.currentThread().getName();
.subscribe(partitionFlux -> subscribeDisposables.add(partitionFlux.publishOn(scheduler).subscribe(record -> { int partition = record.partition(); String current = Thread.currentThread().getName() + ":" + record.offset();