private void subscribeToDestTopic(String groupId, String topic, ReceiverOptions<Integer, Person> receiverOptions, List<Person> received) { receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .addAssignListener(partitions -> { log.debug("Group {} assigned {}", groupId, partitions); partitions.forEach(p -> log.trace("Group {} partition {} position {}", groupId, p, p.position())); }) .addRevokeListener(p -> log.debug("Group {} revoked {}", groupId, p)); Disposable c = KafkaReceiver.create(receiverOptions.subscription(Collections.singleton(topic))) .receive() .subscribe(m -> { Person p = m.value(); received.add(p); log.debug("Thread {} Received from {}: {} ", Thread.currentThread().getName(), m.topic(), p); }); disposables.add(c); } private CommittableSource createTestSource(int count, List<Person> expected) {
ImmutableReceiverOptions(ReceiverOptions<K, V> options) { this( options.consumerProperties(), options.assignListeners(), options.revokeListeners(), options.keyDeserializer(), options.valueDeserializer(), options.pollTimeout(), options.closeTimeout(), options.commitInterval(), options.commitBatchSize(), options.atmostOnceCommitAheadSize(), options.maxCommitAttempts(), options.subscriptionTopics(), options.assignment(), options.subscriptionPattern(), options.schedulerSupplier() ); }
public <K, V> Consumer<K, V> createConsumer(ReceiverOptions<K, V> config) { return new KafkaConsumer<>(config.consumerProperties(), config.keyDeserializer(), config.valueDeserializer()); } }
public ReceiverOptions<Integer, Person> receiverOptions(Collection<String> topics) { return receiverOptions() .addAssignListener(p -> log.info("Group {} partitions assigned {}", groupId, p)) .addRevokeListener(p -> log.info("Group {} partitions assigned {}", groupId, p)) .subscription(topics); }
/** * Tests that commits are disabled completely if periodic commits by batch size * and periodic commits by interval are both disabled. */ @Test public void autoCommitDisable() throws Exception { receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ZERO) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, 20); receiveAndVerify(20); receivedMessages.clear(); consumerFactory.addConsumer(new MockConsumer(cluster)); receiveAndVerify(20); }
@Test public void manualCommitRecordAsync() throws Exception { int count = 10; CountDownLatch commitLatch = new CountDownLatch(count); long[] committedOffsets = new long[partitions]; receiverOptions = receiverOptions .commitInterval(Duration.ZERO) .commitBatchSize(0) .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(receiverOptions) .receive() .doOnNext(record -> record.receiverOffset() .commit() .doOnSuccess(i -> onCommit(record, commitLatch, committedOffsets)) .doOnError(e -> log.error("Commit exception", e)) .subscribe()); subscribe(kafkaFlux, new CountDownLatch(count)); sendMessages(0, count); checkCommitCallbacks(commitLatch, committedOffsets); }
public KafkaReceiver<Integer, String> createReceiver() { receiverOptions = receiverOptions.addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); return KafkaReceiver.create(receiverOptions); }
@Test public void manualCommitRetry() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 2); int count = 10; receiverOptions = receiverOptions .commitBatchSize(0) .commitInterval(Duration.ofMillis(Long.MAX_VALUE)) .maxCommitAttempts(1) .subscription(Collections.singletonList(topic)); sendMessages(topic, 0, count + 10); receiveAndVerify(10, record -> record.receiverOffset().commit().retry().then(Mono.just(record))); verifyCommits(groupId, topic, 10); }
private void testAutoCommitFailureScenarios(boolean retriable, int count, int maxAttempts, int errorInjectIndex, int errorClearIndex) throws Exception { AtomicBoolean failed = new AtomicBoolean(); receiverOptions = receiverOptions.commitBatchSize(1) .commitInterval(Duration.ofMillis(1000)) .maxCommitAttempts(maxAttempts) .closeTimeout(Duration.ofMillis(1000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); if (!failed.get()) { onNextSemaphore.tryAcquire(requestTimeoutMillis, TimeUnit.MILLISECONDS); TestUtils.sleep(receiverOptions.pollTimeout().toMillis());
Map<TopicPartition, ReceiverOffset> assignedPartitions = new HashMap<>(); receiverOptions = receiverOptions .commitInterval(Duration.ZERO) .commitBatchSize(0) .assignment(topicPartitions) .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) { p.seekToBeginning(); .addRevokeListener(partitions -> { for (ReceiverPartition p : partitions) { ReceiverOffset offset = assignedPartitions.remove(p);
.maxCommitAttempts(100) .addAssignListener(this::seekToBeginning) .addAssignListener(this::onPartitionsAssigned) .assignment(Collections.singletonList(new TopicPartition(topic, 1))) .commitInterval(Duration.ZERO) .commitBatchSize(0) .addAssignListener(this::seekToBeginning) .assignment(Collections.singletonList(new TopicPartition(topic, 0)))
log.info("Start consumer {}", id); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .addAssignListener(p -> { log.info("Assigned {} {} {}", Thread.currentThread().getName(), id, p); assigned.incrementAndGet(); }) .addRevokeListener(p -> log.info("Revoked {} {} {}", Thread.currentThread().getName(), id, p)) .subscription(Collections.singletonList(topic)); kafkaFlux[i] = KafkaReceiver.create(receiverOptions).receive() .publishOn(consumerScheduler) TestUtils.waitUntil("Assigment not complete for " + i, () -> assigned, a -> a.get() >= id + 1, assigned, Duration.ofSeconds(30)); assigned.set(0); receiverOptions.clearAssignListeners(); receiverOptions.clearRevokeListeners();
@Test public void atleastOnceCommitBatchSize() throws Exception { receiverOptions.closeTimeout(Duration.ofMillis(1000)); receiverOptions.commitBatchSize(10); receiverOptions.commitInterval(Duration.ofMillis(60000)); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> record.receiverOffset().acknowledge()); sendReceive(fluxWithAck, 0, 100, 0, 100); /// Atmost batchSize records may be redelivered restartAndCheck(receiver, 100, 100, receiverOptions.commitBatchSize()); }
/** * Tests that inbound flux is terminated with an error if transient commit error persists * beyond maximum configured limit. */ @Test public void autoAckCommitTransientErrorMaxRetries() throws Exception { consumer.addCommitException(new RetriableCommitFailedException("coordinator failed"), 5); receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .maxCommitAttempts(5) .commitBatchSize(2); int count = 100; sendMessages(topic, 0, count); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); Semaphore errorSemaphore = new Semaphore(0); receiver.receiveAutoAck() .concatMap(r -> r) .doOnNext(r -> receivedMessages.add(r)) .doOnError(e -> errorSemaphore.release()) .subscribe(); assertTrue("Flux did not fail", errorSemaphore.tryAcquire(1, TimeUnit.SECONDS)); assertTrue("Commit failure did not fail flux", receivedMessages.size() < count); }
/** * Consume from first available offset of partitions by seeking to start of all partitions in the assign listener. */ @Test public void seekToBeginning() throws Exception { sendMessages(topic, 0, 10); Semaphore assignSemaphore = new Semaphore(0); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) p.seekToBeginning(); assignSemaphore.release(); }) .subscription(Collections.singleton(topic)); DefaultKafkaReceiver<Integer, String> receiver = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions); receiveWithOneOffAction(receiver, 10, 10, () -> sendMessages(topic, 10, 20)); assertTrue("Assign callback not invoked", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS)); }
@Test public void publishFromEventScheduler() throws Exception { receiverOptions = receiverOptions .schedulerSupplier(Schedulers::immediate) .addAssignListener(this::onPartitionsAssigned) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(receiverOptions); AtomicReference<String> publishingThreadName = new AtomicReference<>(); CountDownLatch receiveLatch = new CountDownLatch(1); Disposable disposable = receiver.receive() .doOnNext(record -> { publishingThreadName.set(Thread.currentThread().getName()); record.receiverOffset().acknowledge(); receiveLatch.countDown(); }) .subscribe(); subscribeDisposables.add(disposable); waitFoPartitionAssignment(); sendMessages(0, 1); waitForMessages(receiveLatch); assertNotNull(publishingThreadName.get()); assertTrue(publishingThreadName.get().startsWith("reactive-kafka-")); }
ReactiveEndToEndLatency(Map<String, Object> consumerPropsOverride, Map<String, Object> producerPropsOverride, String bootstrapServers, String topic) { super(consumerPropsOverride, producerPropsOverride, bootstrapServers, topic); sender = KafkaSender.create(SenderOptions.create(producerProps)); ReceiverOptions<byte[], byte[]> receiverOptions = ReceiverOptions.<byte[], byte[]>create(consumerProps) .addAssignListener(partitions -> { if (assignSemaphore.availablePermits() == 0) { partitions.forEach(p -> p.seekToEnd()); assignSemaphore.release(); } }) .subscription(Collections.singleton(topic)); flux = KafkaReceiver.create(receiverOptions) .receive(); receiveQueue = new LinkedBlockingQueue<>(); System.out.println("Running latency test using Reactive API, class=" + this.getClass().getName()); } public void initialize() {
@Test public void atleastOnceClose() throws Exception { receiverOptions = receiverOptions.closeTimeout(Duration.ofMillis(1000)) .commitBatchSize(10) .commitInterval(Duration.ofMillis(60000)) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KafkaReceiver<Integer, String> receiver = createReceiver(); Flux<ReceiverRecord<Integer, String>> fluxWithAck = receiver.receive().doOnNext(record -> { if (receivedMessages.get(record.partition()).size() < 10) record.receiverOffset().acknowledge(); }); sendReceive(fluxWithAck, 0, 100, 0, 100); // Check that close commits ack'ed records, does not commit un-ack'ed records cancelSubscriptions(true); clearReceivedMessages(); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux2 = createReceiver().receiveAutoAck().concatMap(r -> r); sendReceive(kafkaFlux2, 100, 100, 10 * partitions, 200 - (10 * partitions)); }
@Test public void backPressureReceiveAtmostOnce() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1") .subscription(Collections.singleton(topic)); Flux<?> flux = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions).receiveAtmostOnce(); testBackPressure(flux); }
@Test public void autoAckPollWithIntervalWillNotFailOnOverflow() throws Exception { ReceiverOptions<Integer, String> options = receiverOptions.addAssignListener(this::onPartitionsAssigned) .commitInterval(Duration.ofMillis(10)) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(options); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAutoAck().concatMap(r -> r); CountDownLatch latch = new CountDownLatch(100); subscribe(kafkaFlux, latch); embeddedKafka.shutdownBroker(brokerId); Thread.sleep(3000); embeddedKafka.startBroker(brokerId); sendMessagesSync(0, 100); waitForMessages(latch); checkConsumedMessages(0, 100); waitForCommits(receiver, 100); }