@Test public void scanMainError() { EmitterProcessor test = EmitterProcessor.create(); test.sink().error(new IllegalStateException("boom")); assertThat(test.scan(Attr.ERROR)).hasMessage("boom"); } }
@Override public <U> UpdateHandlerRegistration<U> registerUpdateHandler( SubscriptionQueryMessage<?, ?, ?> query, SubscriptionQueryBackpressure backpressure, int updateBufferSize) { EmitterProcessor<SubscriptionQueryUpdateMessage<U>> processor = EmitterProcessor.create(updateBufferSize); FluxSink<SubscriptionQueryUpdateMessage<U>> sink = processor.sink(backpressure.getOverflowStrategy()); sink.onDispose(() -> updateHandlers.remove(query)); FluxSinkWrapper<SubscriptionQueryUpdateMessage<U>> fluxSinkWrapper = new FluxSinkWrapper<>(sink); updateHandlers.put(query, fluxSinkWrapper); Registration registration = () -> { fluxSinkWrapper.complete(); return true; }; return new UpdateHandlerRegistration<>(registration, processor.replay(updateBufferSize).autoConnect()); }
FluxSink<String> s = d.sink();
FluxSink<Integer> s = d.sink();
FluxSink<Integer> sink = test.sink();
FluxSink<Integer> session = stream.sink(); stream.subscribe(processor);
assertThat(tp.inners()).hasSize(1); FluxSink<Integer> s = tp.sink();
@Test public void streamValuesCanBeExploded() { // Stream"s values can be exploded // given: "a source composable with a mapMany function" EmitterProcessor<Integer> source = EmitterProcessor.create(); Flux<Integer> mapped = source .log() .publishOn(Schedulers.parallel()) .log() .flatMap(v -> Flux.just(v * 2)) .doOnError(Throwable::printStackTrace); // when: "the source accepts a value" MonoProcessor<Integer> value = mapped.next() .toProcessor(); value.subscribe(); source.sink().next(1); // then: "the value is mapped" int result = value.block(Duration.ofSeconds(5)); assertThat(result).isEqualTo(2); }
public AxonServerSubscriptionQueryResult( SubscriptionQuery query, Function<StreamObserver<SubscriptionQueryResponse>, StreamObserver<SubscriptionQueryRequest>> openStreamFn, AxonServerConfiguration configuration, SubscriptionQueryBackpressure backPressure, int bufferSize, Runnable onDispose) { this.onDispose = onDispose; this.subscriptionQuery = query; EmitterProcessor<QueryUpdate> processor = EmitterProcessor.create(bufferSize); this.updateMessageFluxSink = processor.sink(backPressure.getOverflowStrategy()); StreamObserver<SubscriptionQueryRequest> subscription = openStreamFn.apply(this); Function<FlowControl, SubscriptionQueryRequest> requestMapping = flowControl -> newBuilder().setFlowControl(SubscriptionQuery.newBuilder(subscriptionQuery) .setNumberOfPermits(flowControl.getPermits())).build(); requestObserver = new FlowControllingStreamObserver<>(subscription, configuration, requestMapping, t -> false); requestObserver.sendInitialPermits(); requestObserver.onNext(newBuilder().setSubscribe(subscriptionQuery).build()); updateMessageFluxSink.onDispose(requestObserver::onCompleted); Registration registration = () -> { updateMessageFluxSink.complete(); return true; }; Mono<QueryResponse> mono = Mono.create(sink -> initialResult(sink, requestObserver::onNext)); this.result = new DefaultSubscriptionQueryResult<>(mono, processor.replay().autoConnect(), registration); }
FluxSink<ActionRequest> request = outbound.sink();
private void start() { log.debug("start"); if (!isActive.compareAndSet(false, true)) throw new IllegalStateException("Multiple subscribers are not supported for KafkaReceiver flux"); fluxList.clear(); requestsPending.set(0); consecutiveCommitFailures.set(0); awaitingTransaction.set(false); eventEmitter = EmitterProcessor.create(); eventSubmission = eventEmitter.sink(OverflowStrategy.BUFFER); eventScheduler.start(); Flux<InitEvent> initFlux = Flux.just(initEvent); fluxList.add(eventEmitter); fluxList.add(initFlux); Duration commitInterval = receiverOptions.commitInterval(); if ((ackMode == AckMode.AUTO_ACK || ackMode == AckMode.MANUAL_ACK) && !commitInterval.isZero()) { Flux<CommitEvent> periodicCommitFlux = Flux.interval(receiverOptions.commitInterval()) .onBackpressureLatest() .map(i -> commitEvent.periodicEvent()); fluxList.add(periodicCommitFlux); } eventFlux = Flux.merge(fluxList) .publishOn(eventScheduler); subscribeDisposables.add(eventFlux.subscribe(event -> doEvent(event))); }
private void start() { log.debug("start"); if (!isActive.compareAndSet(false, true)) throw new IllegalStateException("Multiple subscribers are not supported for KafkaReceiver flux"); fluxList.clear(); requestsPending.set(0); consecutiveCommitFailures.set(0); awaitingTransaction.set(false); eventEmitter = EmitterProcessor.create(); eventSubmission = eventEmitter.sink(OverflowStrategy.BUFFER); eventScheduler.start(); Flux<InitEvent> initFlux = Flux.just(initEvent); fluxList.add(eventEmitter); fluxList.add(initFlux); Duration commitInterval = receiverOptions.commitInterval(); if ((ackMode == AckMode.AUTO_ACK || ackMode == AckMode.MANUAL_ACK) && !commitInterval.isZero()) { Flux<CommitEvent> periodicCommitFlux = Flux.interval(receiverOptions.commitInterval()) .onBackpressureLatest() .map(i -> commitEvent.periodicEvent()); fluxList.add(periodicCommitFlux); } eventFlux = Flux.merge(fluxList) .publishOn(eventScheduler); subscribeDisposables.add(eventFlux.subscribe(event -> doEvent(event))); }
/** * Create new {@link DirectSink}. * * @param function the processor to process events emitted onto stream, typically this processor will represent the flow * pipeline. * @param eventConsumer event consumer called just before {@link CoreEvent}'s emission. */ public DirectSink(Function<Publisher<CoreEvent>, Publisher<CoreEvent>> function, Consumer<CoreEvent> eventConsumer, int bufferSize) { EmitterProcessor<CoreEvent> emitterProcessor = EmitterProcessor.create(bufferSize); reactorSink = new ReactorSink(emitterProcessor.sink(), emitterProcessor.transform(function).doOnError(throwable -> { }).subscribe(), eventConsumer, bufferSize); }
@Override public <U> UpdateHandlerRegistration<U> registerUpdateHandler( SubscriptionQueryMessage<?, ?, ?> query, SubscriptionQueryBackpressure backpressure, int updateBufferSize) { EmitterProcessor<SubscriptionQueryUpdateMessage<U>> processor = EmitterProcessor.create(updateBufferSize); FluxSink<SubscriptionQueryUpdateMessage<U>> sink = processor.sink(backpressure.getOverflowStrategy()); sink.onDispose(() -> updateHandlers.remove(query)); FluxSinkWrapper<SubscriptionQueryUpdateMessage<U>> fluxSinkWrapper = new FluxSinkWrapper<>(sink); updateHandlers.put(query, fluxSinkWrapper); Registration registration = () -> { fluxSinkWrapper.complete(); return true; }; return new UpdateHandlerRegistration<>(registration, processor.replay(updateBufferSize).autoConnect()); }
private synchronized Flux<ConsumerRecords<K, V>> createConsumerFlux() { if (consumerFlux != null) throw new IllegalStateException("Multiple subscribers are not supported for KafkaReceiver flux"); Consumer<Flux<?>> kafkaSubscribeOrAssign = (flux) -> receiverOptions.subscriber(this).accept(consumer); initEvent = new InitEvent(kafkaSubscribeOrAssign); pollEvent = new PollEvent(); commitEvent = new CommitEvent(); recordEmitter = EmitterProcessor.create(); recordSubmission = recordEmitter.sink(); scheduler = Schedulers.single(receiverOptions.schedulerSupplier().get()); consumerFlux = recordEmitter .publishOn(scheduler) .doOnSubscribe(s -> { try { start(); } catch (Exception e) { log.error("Subscription to event flux failed", e); throw e; } }) .doOnRequest(r -> { if (requestsPending.get() > 0) pollEvent.scheduleIfRequired(); }) .doAfterTerminate(this::dispose) .doOnCancel(this::dispose); return consumerFlux; }
private synchronized Flux<ConsumerRecords<K, V>> createConsumerFlux() { if (consumerFlux != null) throw new IllegalStateException("Multiple subscribers are not supported for KafkaReceiver flux"); Consumer<Flux<?>> kafkaSubscribeOrAssign = (flux) -> receiverOptions.subscriber(this).accept(consumer); initEvent = new InitEvent(kafkaSubscribeOrAssign); pollEvent = new PollEvent(); commitEvent = new CommitEvent(); recordEmitter = EmitterProcessor.create(); recordSubmission = recordEmitter.sink(); scheduler = Schedulers.single(receiverOptions.schedulerSupplier().get()); consumerFlux = recordEmitter .publishOn(scheduler) .doOnSubscribe(s -> { try { start(); } catch (Exception e) { log.error("Subscription to event flux failed", e); throw e; } }) .doOnRequest(r -> { if (requestsPending.get() > 0) pollEvent.scheduleIfRequired(); }) .doAfterTerminate(this::dispose) .doOnCancel(this::dispose); return consumerFlux; }
public Flux<?> flux() { sender = sender(senderOptions()); EmitterProcessor<Person> processor = EmitterProcessor.create(); FluxSink<Person> incoming = processor.sink(); Flux<?> inFlux = KafkaReceiver.create(receiverOptions(Collections.singleton(sourceTopic))) .receiveAutoAck() .concatMap(r -> r) .doOnNext(m -> incoming.next(m.value())); Flux<SenderResult<Integer>> stream1 = sender.send(processor.publishOn(scheduler1).map(p -> SenderRecord.create(process1(p, true), p.id()))); Flux<SenderResult<Integer>> stream2 = sender.send(processor.publishOn(scheduler2).map(p -> SenderRecord.create(process2(p, true), p.id()))); AtomicReference<Disposable> cancelRef = new AtomicReference<>(); Consumer<AtomicReference<Disposable>> cancel = cr -> { Disposable c = cr.getAndSet(null); if (c != null) c.dispose(); }; return Flux.merge(stream1, stream2) .doOnSubscribe(s -> cancelRef.set(inFlux.subscribe())) .doOnCancel(() -> { cancel.accept(cancelRef); close(); }) .doOnTerminate(() -> close()); } public ProducerRecord<Integer, Person> process1(Person p, boolean debug) {
/** * Tests that Producer send Exceptions do not cancel Record Publishers when stopOnError=false */ @Test public void sendDontStopOnSerializationError() throws Exception { ProducerRecord<Integer, String> recordToFail = createProducerRecord(0, false); ProducerRecord<Integer, String> recordToSucceed = createProducerRecord(1, true); recreateSender(senderOptions.stopOnError(false).producerProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, FirstTimeFailingStringSerializer.class.getName())); Semaphore errorSemaphore = new Semaphore(0); EmitterProcessor<ProducerRecord<Integer, String>> processor = EmitterProcessor.create(); kafkaSender.send(processor.map(producerRecord -> SenderRecord.create(producerRecord, null))) .doOnError(t -> errorSemaphore.release()) .subscribe(); FluxSink<ProducerRecord<Integer, String>> sink = processor.sink(); sink.next(recordToFail); sink.next(recordToSucceed); sink.complete(); waitForMessages(consumer, 1, true); assertTrue("Error callback not invoked", errorSemaphore.tryAcquire(requestTimeoutMillis, TimeUnit.MILLISECONDS)); }
public AxonServerSubscriptionQueryResult( SubscriptionQuery query, Function<StreamObserver<SubscriptionQueryResponse>, StreamObserver<SubscriptionQueryRequest>> openStreamFn, AxonServerConfiguration configuration, SubscriptionQueryBackpressure backPressure, int bufferSize, Runnable onDispose) { this.onDispose = onDispose; this.subscriptionQuery = query; EmitterProcessor<QueryUpdate> processor = EmitterProcessor.create(bufferSize); this.updateMessageFluxSink = processor.sink(backPressure.getOverflowStrategy()); StreamObserver<SubscriptionQueryRequest> subscription = openStreamFn.apply(this); Function<FlowControl, SubscriptionQueryRequest> requestMapping = flowControl -> newBuilder().setFlowControl(SubscriptionQuery.newBuilder(subscriptionQuery) .setNumberOfPermits(flowControl.getPermits())).build(); requestObserver = new FlowControllingStreamObserver<>(subscription, configuration, requestMapping, t -> false); requestObserver.sendInitialPermits(); requestObserver.onNext(newBuilder().setSubscribe(subscriptionQuery).build()); updateMessageFluxSink.onDispose(requestObserver::onCompleted); Registration registration = () -> { updateMessageFluxSink.complete(); return true; }; Mono<QueryResponse> mono = Mono.create(sink -> initialResult(sink, requestObserver::onNext)); this.result = new DefaultSubscriptionQueryResult<>(mono, processor.replay().autoConnect(), registration); }