processor.publishOn(schedulers[0]) .share(); processor.publishOn(schedulers[1]) .subscribe(i -> { assertThat(Thread.currentThread().getName().contains("scheduler1")).isTrue(); for (int i = 0; i < count; i++) processor.onNext(i); processor.publishOn(schedulers[2]) .map(i -> { assertThat(Thread.currentThread().getName().contains("scheduler2")).isTrue();
deferred.publishOn(asyncGroup) .parallel(8) .groups()
default: mapManydeferred = EmitterProcessor.create(); ("sync".equals(dispatcher) ? mapManydeferred : mapManydeferred.publishOn(asyncGroup)) .flatMap(Flux::just) .subscribe(i -> latch.countDown());
FluxSink<String> s = d.sink(); Flux<Integer> tasks = d.publishOn(Schedulers.parallel()) .parallel(8) .groups()
@Test(timeout = 10000L) public void collectFromMultipleThread1() throws Exception { EmitterProcessor<Integer> head = EmitterProcessor.create(); AtomicInteger sum = new AtomicInteger(); int length = 1000; int batchSize = 333; int latchCount = length / batchSize; CountDownLatch latch = new CountDownLatch(latchCount); head .publishOn(Schedulers.parallel()) .parallel(3) .runOn(Schedulers.parallel()) .collect(ArrayList::new, List::add) .subscribe(ints -> { sum.addAndGet(ints.size()); latch.countDown(); }); Flux.range(1, 1000).subscribe(head); latch.await(); Assert.assertTrue(sum.get() == length); }
d.publishOn(Schedulers.parallel()) .parallel(8) .groups()
@Test public void simpleReactiveSubscriber() throws InterruptedException { EmitterProcessor<String> str = EmitterProcessor.create(); str.publishOn(asyncGroup) .subscribe(new FooSubscriber()); str.onNext("Goodbye World!"); str.onNext("Goodbye World!"); str.onComplete(); Thread.sleep(500); }
@Test public void shouldCorrectlyDispatchComplexFlow() throws InterruptedException { EmitterProcessor<Integer> globalFeed = EmitterProcessor.create(); CountDownLatch afterSubscribe = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(4); Flux<Integer> s = Flux.just("2222") .map(Integer::parseInt) .flatMap(l -> Flux.merge(globalFeed.publishOn(asyncGroup), Flux.just(1111, l, 3333, 4444, 5555, 6666)).log("merged") .publishOn(asyncGroup) .log("dispatched") .doOnSubscribe(x -> afterSubscribe.countDown()) .filter(nearbyLoc -> 3333 >= nearbyLoc) .filter(nearbyLoc -> 2222 <= nearbyLoc) ); /*Disposable action = */s .limitRate(1) .subscribe(integer -> { latch.countDown(); System.out.println(integer); }); afterSubscribe.await(5, TimeUnit.SECONDS); globalFeed.onNext(2223); globalFeed.onNext(2224); latch.await(5, TimeUnit.SECONDS); assertEquals("Must have counted 4 elements", 0, latch.getCount()); }
@Test public void shouldCorrectlyDispatchBatchedTimeout() throws InterruptedException { long timeout = 100; final int batchsize = 4; int parallelStreams = 16; CountDownLatch latch = new CountDownLatch(1); final EmitterProcessor<Integer> streamBatcher = EmitterProcessor.create(); streamBatcher.publishOn(asyncGroup) .bufferTimeout(batchsize, Duration.ofSeconds(timeout)) .log("batched") .parallel(parallelStreams) .groups() .log("batched-inner") .subscribe(innerStream -> innerStream.publishOn(asyncGroup) .doOnError(Throwable::printStackTrace) .subscribe(i -> latch.countDown())); streamBatcher.onNext(12); streamBatcher.onNext(123); streamBatcher.onNext(42); streamBatcher.onNext(666); boolean finished = latch.await(2, TimeUnit.SECONDS); if (!finished) { throw new RuntimeException(latch.getCount()+""); } else { assertEquals("Must have correct latch number : " + latch.getCount(), latch.getCount(), 0); } }
@Test public void sampleZipTest3() throws Exception { int elements = 1; CountDownLatch latch = new CountDownLatch(elements + 1); EmitterProcessor<SensorData> sensorDataProcessor = EmitterProcessor.create(); Scheduler scheduler = Schedulers.single(); sensorDataProcessor.publishOn(scheduler) .subscribe(d -> latch.countDown(), null, latch::countDown); Flux.zip(Flux.just(new SensorData(2L, 12.0f)), Flux.just(new SensorData(1L, 14.0f)), this::computeMin) .log("zip3") .subscribe(sensorDataProcessor); awaitLatch(null, latch); scheduler.dispose(); }
computationEmitterProcessor.publishOn(computation) .map(i -> { final List<String> list = new ArrayList<>(i); persistenceEmitterProcessor.publishOn(persistence) .doOnNext(i -> println("Persisted: ", i)) .map(i -> Collections.singletonList("done" + i)) .log("persistence"); Flux<Integer> forkStream = forkEmitterProcessor.publishOn(forkJoin) .log("fork");
private synchronized Flux<ConsumerRecords<K, V>> createConsumerFlux() { if (consumerFlux != null) throw new IllegalStateException("Multiple subscribers are not supported for KafkaReceiver flux"); Consumer<Flux<?>> kafkaSubscribeOrAssign = (flux) -> receiverOptions.subscriber(this).accept(consumer); initEvent = new InitEvent(kafkaSubscribeOrAssign); pollEvent = new PollEvent(); commitEvent = new CommitEvent(); recordEmitter = EmitterProcessor.create(); recordSubmission = recordEmitter.sink(); scheduler = Schedulers.single(receiverOptions.schedulerSupplier().get()); consumerFlux = recordEmitter .publishOn(scheduler) .doOnSubscribe(s -> { try { start(); } catch (Exception e) { log.error("Subscription to event flux failed", e); throw e; } }) .doOnRequest(r -> { if (requestsPending.get() > 0) pollEvent.scheduleIfRequired(); }) .doAfterTerminate(this::dispose) .doOnCancel(this::dispose); return consumerFlux; }
private synchronized Flux<ConsumerRecords<K, V>> createConsumerFlux() { if (consumerFlux != null) throw new IllegalStateException("Multiple subscribers are not supported for KafkaReceiver flux"); Consumer<Flux<?>> kafkaSubscribeOrAssign = (flux) -> receiverOptions.subscriber(this).accept(consumer); initEvent = new InitEvent(kafkaSubscribeOrAssign); pollEvent = new PollEvent(); commitEvent = new CommitEvent(); recordEmitter = EmitterProcessor.create(); recordSubmission = recordEmitter.sink(); scheduler = Schedulers.single(receiverOptions.schedulerSupplier().get()); consumerFlux = recordEmitter .publishOn(scheduler) .doOnSubscribe(s -> { try { start(); } catch (Exception e) { log.error("Subscription to event flux failed", e); throw e; } }) .doOnRequest(r -> { if (requestsPending.get() > 0) pollEvent.scheduleIfRequired(); }) .doAfterTerminate(this::dispose) .doOnCancel(this::dispose); return consumerFlux; }
public Flux<?> flux() { sender = sender(senderOptions()); EmitterProcessor<Person> processor = EmitterProcessor.create(); FluxSink<Person> incoming = processor.sink(); Flux<?> inFlux = KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receiveAutoAck() .concatMap(r -> r) .doOnNext(m -> incoming.next(m.value())); Flux<SenderResult<Integer>> stream1 = sender.send(processor.publishOn(scheduler1).map(p -> SenderRecord.create(process1(p, true), p.id()))); Flux<SenderResult<Integer>> stream2 = sender.send(processor.publishOn(scheduler2).map(p -> SenderRecord.create(process2(p, true), p.id()))); AtomicReference<Disposable> cancelRef = new AtomicReference<>(); Consumer<AtomicReference<Disposable>> cancel = cr -> { Disposable c = cr.getAndSet(null); if (c != null) c.dispose(); }; return Flux.merge(stream1, stream2) .doOnSubscribe(s -> cancelRef.set(inFlux.subscribe())) .doOnCancel(() -> { cancel.accept(cancelRef); close(); }) .doOnTerminate(() -> close()); } public ProducerRecord<Integer, Person> process1(Person p, boolean debug) {