@Test public void fluxValuesCanBeMapped() { // "A Flux"s values can be mapped" // given: "a source composable with a mapping function" EmitterProcessor<Integer> source = EmitterProcessor.create(); Flux<Integer> mapped = source.map(it -> it * 2); // when: "the source accepts a value" AtomicReference<Integer> value = new AtomicReference<>(); mapped.subscribe(value::set); source.onNext(1); // then: "the value is mapped" assertThat(value.get()).isEqualTo(2); }
@Test public void whenMappingFunctionThrowsMappedComposableAcceptsError() { // "When a mapping function throws an exception, the mapped composable accepts the error" // given: "a source composable with a mapping function that throws an error" EmitterProcessor<Integer> source = EmitterProcessor.create(); Flux<String> mapped = source.map(it -> { if (it == 1) { throw new RuntimeException(); } else { return "na"; } }); LongAdder errors = new LongAdder(); mapped.doOnError(e -> errors.increment()) .subscribe(); // when: "the source accepts a value" source.onNext(1); // then: "the error is passed on" assertThat(errors.intValue()).isEqualTo(1); }
@Test public void subscribeWithSyncFusionUpstreamFirst() { EmitterProcessor<String> processor = EmitterProcessor.create(16); StepVerifier.create( Mono.just("DATA") .subscribeWith(processor) .map(String::toLowerCase) ) .expectNext("data") .expectComplete() .verify(Duration.ofSeconds(1)); assertThat(processor.blockFirst()).as("later subscription").isNull(); }
@Test public void multipleStreamValuesCanBeMerged() { // "Multiple Stream"s values can be merged" // given: "source composables to merge, buffer and tap" EmitterProcessor<Integer> source1 = EmitterProcessor.create(); EmitterProcessor<Integer> source2 = EmitterProcessor.create(); source2.map(it -> it) .map(it -> it); EmitterProcessor<Integer> source3 = EmitterProcessor.create(); AtomicReference<List<Integer>> tap = new AtomicReference<>(); Flux.merge(source1, source2, source3).log().buffer(3) .log().subscribe(tap::set); // when: "the sources accept a value" source1.onNext(1); source2.onNext(2); source3.onNext(3); // then: "the values are all collected from source1 flux" assertThat(tap.get()).containsExactly(1, 2, 3); }
@Override public Flux<SimpleResponse> streamOnFireAndForget(Empty message, ByteBuf metadata) { return messages.map( simpleRequest -> SimpleResponse.newBuilder() .setResponseMessage("got fire and forget -> " + simpleRequest.getRequestMessage()) .build()); }
.scheduler(scheduler); recreateSender(senderOptions); kafkaSender.send(emitter.map(i -> SenderRecord.<Integer, String, Integer>create(new ProducerRecord<Integer, String>(topic, i % partitions, i, "Message " + i), i))) .doOnNext(result -> { int messageIdentifier = result.correlationMetadata();
/** * Tests that Producer send Exceptions do not cancel Record Publishers when stopOnError=false */ @Test public void sendDontStopOnSerializationError() throws Exception { ProducerRecord<Integer, String> recordToFail = createProducerRecord(0, false); ProducerRecord<Integer, String> recordToSucceed = createProducerRecord(1, true); recreateSender(senderOptions.stopOnError(false).producerProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, FirstTimeFailingStringSerializer.class.getName())); Semaphore errorSemaphore = new Semaphore(0); EmitterProcessor<ProducerRecord<Integer, String>> processor = EmitterProcessor.create(); kafkaSender.send(processor.map(producerRecord -> SenderRecord.create(producerRecord, null))) .doOnError(t -> errorSemaphore.release()) .subscribe(); FluxSink<ProducerRecord<Integer, String>> sink = processor.sink(); sink.next(recordToFail); sink.next(recordToSucceed); sink.complete(); waitForMessages(consumer, 1, true); assertTrue("Error callback not invoked", errorSemaphore.tryAcquire(requestTimeoutMillis, TimeUnit.MILLISECONDS)); }