public Flux<?> flux() { Scheduler scheduler = Schedulers.newElastic("sample", 60, true); return KafkaReceiver.create(receiverOptions(Collections.singleton(topic)).commitInterval(Duration.ZERO)) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .flatMap(partitionFlux -> partitionFlux.publishOn(scheduler) .map(r -> processRecord(partitionFlux.key(), r)) .sample(Duration.ofMillis(5000)) .concatMap(offset -> offset.commit())) .doOnCancel(() -> close()); } public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) {
public Flux<?> flux() { return KafkaReceiver.create(receiverOptions(Collections.singletonList(topic)).commitInterval(Duration.ZERO)) .receive() .publishOn(scheduler) .concatMap(m -> storeInDB(m.value()) .thenEmpty(m.receiverOffset().commit())) .retry() .doOnCancel(() -> close()); } public Mono<Void> storeInDB(Person person) {
/** * Tests manual commits for {@link KafkaReceiver#receive()} with synchronous commits * after message processing. */ @Test public void manualCommitSync() throws Exception { int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> { StepVerifier.create(record.receiverOffset().commit()).expectComplete().verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); return Mono.just(record); }); verifyCommits(groupId, topic, 10); }
/** * Tests manual commits for {@link KafkaReceiver#receive()} with asynchronous commits. * Tests that commits are completed when the flux is closed gracefully. */ @Test public void manualCommitAsync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> Flux.merge( Mono.just(record), record.receiverOffset() .commit() .doOnSuccess(i -> commitLatch.countDown()) .then(Mono.empty()) ).single()); verifyCommits(groupId, topic, 10); assertTrue("Offsets not committed", commitLatch.await(1, TimeUnit.SECONDS)); }
@Test public void manualAssignmentWithCommit() throws Exception { receiverOptions = receiverOptions.commitInterval(Duration.ZERO) .commitBatchSize(0) .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .delayUntil(r -> r.receiverOffset().commit()) .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
@Test public void manualCommitBatch() throws Exception { int count = 20; int commitIntervalMessages = 4; CountDownLatch commitLatch = new CountDownLatch(count / commitIntervalMessages); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = -1; List<ReceiverOffset> uncommitted = new ArrayList<>(); receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .concatMap(record -> { ReceiverOffset offset = record.receiverOffset(); offset.acknowledge(); uncommitted.add(offset); if (uncommitted.size() == commitIntervalMessages) { return offset.commit() .doOnSuccess(i -> onCommit(uncommitted, commitLatch, committedOffsets)) .doOnError(e -> log.error("Commit exception", e)) .then(Mono.just(record)); } return Mono.just(record); }) .doOnError(e -> log.error("KafkaFlux exception", e)); sendAndWaitForMessages(kafkaFlux, count); checkCommitCallbacks(commitLatch, committedOffsets); }
public Flux<ReceiverRecord<Integer, String>> receiveWithManualCommitFailures(boolean retriable, int failureCount, Semaphore receiveSemaphore, Semaphore successSemaphore, Semaphore failureSemaphore) { AtomicInteger retryCount = new AtomicInteger(); if (retriable) injectCommitEventForRetriableException(); return kafkaReceiver.receive() .doOnSubscribe(s -> { if (retriable) injectCommitEventForRetriableException(); }) .doOnNext(record -> { try { receiveSemaphore.release(); injectCommitError(); Predicate<Throwable> retryPredicate = e -> { if (retryCount.incrementAndGet() == failureCount) clearCommitError(); return retryCount.get() <= failureCount + 1; }; record.receiverOffset().commit() .doOnError(e -> failureSemaphore.release()) .doOnSuccess(i -> successSemaphore.release()) .retry(retryPredicate) .subscribe(); } catch (Exception e) { fail("Unexpected exception: " + e); } }) .doOnError(e -> log.error("KafkaFlux exception", e)); }
@Test public void manualCommitRetry() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(1) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().retry().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
/** * Tests that commits are retried if the failure is transient and the manual commit Mono * is not failed if the commit succeeds within the configured number of attempts. */ @Test public void manualCommitAttempts() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(10) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
/** * Tests manual commits for {@link KafkaReceiver#receive()} with synchronous commits * using {@link Mono#block()} when there are no polls due to back-pressure. */ @Test public void manualCommitBlockNoPoll() throws Exception { int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); Flux<ReceiverRecord<Integer, String>> inboundFlux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receive(); StepVerifier.create(inboundFlux.publishOn(Schedulers.elastic()), 1) .consumeNextWith(record -> { receivedMessages.add(record); record.receiverOffset().commit().block(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); }) .thenCancel() .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); verifyCommits(groupId, topic, 19); }
@Test public void manualCommitFailure() throws Exception { int count = 1; AtomicBoolean commitSuccess = new AtomicBoolean(); Semaphore commitErrorSemaphore = new Semaphore(0); receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .doOnNext(record -> { ReceiverOffset offset = record.receiverOffset(); TestableReceiver.setNonExistentPartition(offset); record.receiverOffset().acknowledge(); record.receiverOffset().commit() .doOnError(e -> commitErrorSemaphore.release()) .doOnSuccess(i -> commitSuccess.set(true)) .subscribe(); }) .doOnError(e -> log.error("KafkaFlux exception", e)); subscribe(kafkaFlux, new CountDownLatch(count)); sendMessages(1, count); assertTrue("Commit error callback not invoked", commitErrorSemaphore.tryAcquire(receiveTimeoutMillis, TimeUnit.MILLISECONDS)); assertFalse("Commit of non existent topic succeeded", commitSuccess.get()); }
.doOnNext(record -> { receivedMessages.add(record); record.receiverOffset().commit().doOnSuccess(v -> commitSemaphore.release()).subscribe(); }); StepVerifier.create(inboundFlux, 1)
/** * Tests that manual commit Mono is failed if commits did not succeed after a transient error * within the configured number of attempts. */ @Test public void manualCommitFailure() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 10); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(2) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveVerifyError(RetriableCommitFailedException.class, record -> record.receiverOffset().commit().retry(5).then(Mono.just(record)) ); }
@Test public void manualCommitRecordAsync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; receiverOptions = receiverOptions .commitInterval(Duration.ZERO) .commitBatchSize(0) .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnNext(record -> record.receiverOffset() .commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)) .doOnError(e -> log.error("Commit exception", e)) .subscribe()); subscribe(kafkaFlux, new CountDownLatch(count)); sendMessages(0, count); checkCommitCallbacks(commitLatch, committedOffsets); }
onReceive(record); record.receiverOffset() .commit() .doOnError(e -> commitFailures.incrementAndGet()) .doOnSuccess(v -> commitSemaphore.release())
/** * Tests that offsets that are not committed explicitly are not committed * on close and that uncommitted records are redelivered on the next receive. */ @Test public void manualCommitClose() throws Exception { receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ZERO) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20, r -> { if (r.receiverOffset().offset() < 5) return r.receiverOffset().commit().then(Mono.just(r)); return Mono.just(r); }); receivedMessages.removeIf(r -> r.offset() >= 5); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(10); }
@Test public void manualCommitSync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = 0; receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receive() .delayUntil(record -> { assertEquals(committedOffsets[record.partition()], record.offset()); return record.receiverOffset().commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)); }) .doOnError(e -> log.error("KafkaFlux exception", e)); sendAndWaitForMessages(kafkaFlux, count); checkCommitCallbacks(commitLatch, committedOffsets); }
@Test public void testThatDisposeOfResourceOnEventThreadCompleteSuccessful() { receiverOptions = receiverOptions .subscription(Collections.singleton(topic)); sendMessages(topic, 0, 10); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Flux<ReceiverRecord<Integer, String>> inboundFlux = receiver.receive() .publishOn(Schedulers.newSingle("test")) .concatMap(record -> record.receiverOffset() .commit() .thenReturn(record)); StepVerifier.create(inboundFlux.take(10)) .expectNextCount(10) .expectComplete() .verify(Duration.ofMillis(DEFAULT_TEST_TIMEOUT)); }
@Test public void manualCommitAsyncNoPoll() throws Exception { CountDownLatch commitLatch = new CountDownLatch(1); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = 0; receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> inboundFlux = receiver.receive() .doOnNext(record -> onReceive(record)); sendMessages(0, 10); StepVerifier.create(inboundFlux, 1) .consumeNextWith(record -> { assertEquals(committedOffsets[record.partition()], record.offset()); record.receiverOffset().commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)) .subscribe(); }) .thenCancel() .verify(Duration.ofSeconds(receiveTimeoutMillis)); checkCommitCallbacks(commitLatch, committedOffsets); } @Test
@Test public void manualCommitSyncNoPoll() throws Exception { CountDownLatch commitLatch = new CountDownLatch(1); long[] committedOffsets = new long[partitions]; for (int i = 0; i < committedOffsets.length; i++) committedOffsets[i] = 0; receiverOptions = receiverOptions.commitInterval(Duration.ZERO).commitBatchSize(0) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> inboundFlux = receiver.receive() .doOnNext(record -> onReceive(record)); sendMessages(0, 10); StepVerifier.create( inboundFlux.take(1) .concatMap(record -> { assertEquals(committedOffsets[record.partition()], record.offset()); return record.receiverOffset() .commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)) .then(Mono.just(record)); }), 1) .expectNextCount(1) .expectComplete() .verify(Duration.ofSeconds(receiveTimeoutMillis)); checkCommitCallbacks(commitLatch, committedOffsets); }