@Override public Flux<?> flux() { KafkaSender<Integer, Person> sender = sender(senderOptions()); ReceiverOptions<Integer, Person> receiverOptions = receiverOptions(Collections.singleton(sourceTopic)); KafkaReceiver<Integer, Person> receiver = KafkaReceiver.create(receiverOptions); return receiver.receiveExactlyOnce(sender.transactionManager()) .concatMap(f -> sendAndCommit(f)) .onErrorResume(e -> sender.transactionManager().abort().then(Mono.error(e))) .doOnCancel(() -> close()); } private Flux<SenderResult<Integer>> sendAndCommit(Flux<ConsumerRecord<Integer, Person>> flux) {
private Flux<SenderResult<Integer>> receiveAndSendTransactions(KafkaReceiver<Integer, String> receiver, KafkaSender<Integer, String> sender, String destTopic, int count, int exceptionIndex) { AtomicInteger index = new AtomicInteger(); TransactionManager transactionManager = sender.transactionManager(); return receiver.receiveExactlyOnce(transactionManager) .concatMap(f -> sender.send( f.map(r -> toSenderRecord(destTopic, r, r.key())) .doOnNext(r -> { if (index.incrementAndGet() == exceptionIndex) { throw new RuntimeException("Test exception"); } }) ).concatWith(transactionManager.commit()) ) .take(count); } }
/** * Tests transaction abort with messages to multiple partitions as well as offset commits * included within each transaction. */ @Test public void transactionAbort() throws Exception { int count = 30; sendMessages(srcTopic, 0, count); TransactionManager transactionManager = sender.transactionManager(); Flux<SenderResult<Integer>> flux = receiver.receiveExactlyOnce(transactionManager) .concatMap(f -> sendAndCommit(destTopic, f, 15)) .onErrorResume(e -> transactionManager.abort().then(Mono.error(e))); StepVerifier.create(flux.then()) .expectErrorMessage("Test exception") .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); verifyTransaction(count, 10); assertEquals(2, producer.beginCount); assertEquals(1, producer.commitCount); assertEquals(1, producer.abortCount); assertEquals(1, producer.sendOffsetsCount); }
/** * Tests transaction receive and send good path with messages to multiple partitions * as well as offset commits included within each transaction. */ @Test public void transactionalReceiveAndSend() throws Exception { int count = 600; sendMessages(srcTopic, 0, count); int transactionCount = count / maxPollRecords; Flux<SenderResult<Integer>> flux = receiver.receiveExactlyOnce(sender.transactionManager()) .concatMap(f -> sendAndCommit(destTopic, f, -1)); Disposable disposable = flux.subscribe(); waitForTransactions(transactionCount); disposable.dispose(); verifyTransaction(count, count); assertEquals(transactionCount, producer.beginCount); assertEquals(transactionCount, producer.commitCount); assertEquals(0, producer.abortCount); assertEquals(transactionCount, producer.sendOffsetsCount); }
/** * Tests transaction good path with messages to multiple partitions as well as offset commits * triggered using {@link KafkaSender#transactionManager().addOffset(ReceiverOffset, String)} * included within each transaction. */ @Test public void transactionFailure() throws Exception { int count = 30; int failureKey = 15; consumerFactory.addConsumer(new MockConsumer(cluster)); sendMessages(srcTopic, 0, count); Flux<SenderResult<Integer>> flux = receiver.receiveExactlyOnce(sender.transactionManager()) .concatMap(f -> sendAndCommit(destTopic, f, failureKey)) .onErrorResume(e -> sender.transactionManager().abort().then(Mono.error(e))); KafkaReceiver<Integer, String> receiver2 = new DefaultKafkaReceiver<Integer, String>(consumerFactory, receiverOptions); Flux<SenderResult<Integer>> errorResumeFlux = receiver2.receiveExactlyOnce(sender.transactionManager()) .concatMap(f -> sendAndCommit(destTopic, f, -1)); Disposable disposable = flux.onErrorResume(e -> errorResumeFlux).subscribe(); waitForTransactions(3); disposable.dispose(); verifyTransaction(count, count); assertEquals(4, producer.beginCount); assertEquals(3, producer.commitCount); assertEquals(1, producer.abortCount); assertEquals(3, producer.sendOffsetsCount); }