/** * Returns the configured Kafka consumer group id. * @return group id */ @Override public String groupId() { return (String) consumerProperty(ConsumerConfig.GROUP_ID_CONFIG); }
@Override public String groupId() { return (String) consumerProperty(ConsumerConfig.GROUP_ID_CONFIG); }
public TestableReceiver createTestFlux() { KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> kafkaFlux = receiver.receive(); return new TestableReceiver(receiver, kafkaFlux); }
@Test public void atleastOnceCommitBatchSize() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(10); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); /// Atmost batchSize records may be redelivered restartAndCheck(receiver, 100, 100, receiverOptions.commitBatchSize()); }
@Test public void manualAssignmentWithCommit() throws Exception { receiverOptions = receiverOptions.commitInterval(Duration.ZERO) .commitBatchSize(0) .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .delayUntil(r -> r.receiverOffset().commit()) .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
public Flux<?> flux() { return KafkaReceiver.create(receiverOptions(Collections.singletonList(topic)).commitInterval(Duration.ZERO)) .receive() .publishOn(scheduler) .concatMap(m -> storeInDB(m.value()) .thenEmpty(m.receiverOffset().commit())) .retry() .doOnCancel(() -> close()); } public Mono<Void> storeInDB(Person person) {
@Test public void seekToBeginning() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, count, count, 0, count * 2); }
@Test public void autoCommitFailurePropagationAfterRetries() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .maxCommitAttempts(2); testAutoCommitFailureScenarios(true, count, 2, 0, Integer.MAX_VALUE); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receive(); sendReceiveWithRedelivery(flux, count, count, 2, 5); }
public KafkaReceiver<Integer, String> createReceiver() { receiverOptions = receiverOptions.addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); return KafkaReceiver.create(receiverOptions); }
@Test public void manualAssignment() throws Exception { receiverOptions = receiverOptions .assignment(getTopicPartitions()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnSubscribe(s -> assignSemaphore.release()); sendReceiveWithSendDelay(kafkaFlux, Duration.ofMillis(1000), 0, 10); }
private void restartAndCheck(KafkaReceiver<Integer, String> receiver, int sendStartIndex, int sendCount, int maxRedelivered) throws Exception { Thread.sleep(500); // Give a little time for commits to complete before terminating abruptly new TestableReceiver(receiver).terminate(); cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAtmostOnce(); sendReceiveWithRedelivery(kafkaFlux2, sendStartIndex, sendCount, 0, maxRedelivered); clearReceivedMessages(); cancelSubscriptions(false); }
@Test public void atmostOnce() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAtmostOnce(); sendReceive(kafkaFlux, 0, 10, 0, 10); // Second consumer should receive only new messages even though first one was not closed gracefully restartAndCheck(receiver, 10, 10, 0); }
@Test public void autoCommitNonRetriableException() throws Exception { int count = 5; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); testAutoCommitFailureScenarios(false, count, 2, 0, 10); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceiveWithRedelivery(flux, count, count, 3, 5); }
@Test public void sendReceive() throws Exception { Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive(); sendReceive(kafkaFlux, 0, 100, 0, 100); }
@Test public void autoCommitRetry() throws Exception { int count = 5; testAutoCommitFailureScenarios(true, count, 10, 0, 2); Flux<? extends ConsumerRecord<Integer, String>> flux = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(flux, count, count, count, count); }
@Test public void atleastOnceCommitRecord() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(1); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); // Atmost one record may be redelivered restartAndCheck(receiver, 100, 100, 1); }
@Test public void wildcardSubscribe() throws Exception { receiverOptions = receiverOptions .addAssignListener(this::onPartitionsAssigned) .subscription(Pattern.compile("test.*")); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive(); sendReceive(kafkaFlux, 0, 10, 0, 10); }
/** * Returns the configured Kafka consumer group id. * @return group id */ @Override public String groupId() { return (String) consumerProperty(ConsumerConfig.GROUP_ID_CONFIG); }
@Override public String groupId() { return (String) consumerProperty(ConsumerConfig.GROUP_ID_CONFIG); }
@Test public void atleastOnceCommitInterval() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(Integer.MAX_VALUE); receiverOptions.commitInterval(Duration.ofMillis(1000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); Thread.sleep(1500); restartAndCheck(receiver, 100, 100, 0); }