private void sendReceiveWithSendDelay(Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux, Duration sendDelay, int startIndex, int count) throws Exception { CountDownLatch latch = new CountDownLatch(count); subscribe(kafkaFlux, latch); Thread.sleep(sendDelay.toMillis()); sendMessages(startIndex, count); waitForMessages(latch); checkConsumedMessages(startIndex, count); }
private void sendReceive(Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux, int sendStartIndex, int sendCount, int receiveStartIndex, int receiveCount) throws Exception { CountDownLatch latch = new CountDownLatch(receiveCount); subscribe(kafkaFlux, latch); if (sendCount > 0) sendMessages(sendStartIndex, sendCount); waitForMessages(latch); checkConsumedMessages(receiveStartIndex, receiveCount); }
private void sendReceiveWithRedelivery(Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux, int sendStartIndex, int sendCount, int minRedelivered, int maxRedelivered) throws Exception { int maybeRedelivered = maxRedelivered - minRedelivered; CountDownLatch latch = new CountDownLatch(sendCount + maxRedelivered); subscribe(kafkaFlux, latch); sendMessages(sendStartIndex, sendCount); // Countdown the latch manually for messages that may or may not be redelivered on each partition for (int i = 0; i < partitions; i++) { TestUtils.waitUntil("Messages not received on partition " + i, null, list -> list.size() > 0, receivedMessages.get(i), Duration.ofMillis(receiveTimeoutMillis)); } int minReceiveIndex = sendStartIndex - minRedelivered; for (int i = minReceiveIndex - maybeRedelivered; i < minReceiveIndex; i++) { int partition = i % partitions; if (receivedMessages.get(partition).get(0) > i) latch.countDown(); } // Wait for messages, redelivered as well as those sent here waitForMessages(latch); // Within the range including redelivered, check that all messages were delivered. for (int i = 0; i < partitions; i++) { List<Integer> received = receivedMessages.get(i); int receiveStartIndex = received.get(0); int receiveEndIndex = received.get(received.size() - 1); checkConsumedMessages(i, receiveStartIndex, receiveEndIndex); } }
waitForMessages(latch); assertEquals("Concurrent executions on partition", 0, concurrentPartitionExecutions.get()); checkConsumedMessages(0, count); assertNotEquals("No concurrent executions across partitions", 0, concurrentExecutions.get()); } finally {
@Test public void sendTransactionalReadCommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); int count = 100; CountDownLatch latch = new CountDownLatch(count); subscribe(createReceiver().receive(), latch); KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.sendTransactionally(Flux.just(createSenderRecords(0, count, true))) .doOnNext(result -> assertEquals(count, latch.getCount())) .blockLast(Duration.ofSeconds(receiveTimeoutMillis)); waitForMessages(latch); checkConsumedMessages(0, count); }
@Test public void sendTransactionalReadUncommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_uncommitted"); int count = 100; CountDownLatch latch1 = new CountDownLatch(count); CountDownLatch latch2 = new CountDownLatch(count * 2); CountDownLatch latch3 = new CountDownLatch(count * 3); subscribe(createReceiver().receive(), latch1, latch2, latch3); sendMessages(0, count); waitForMessages(latch1); // non-transactional messages received KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.sendTransactionally(Flux.just(createSenderRecords(count, count, true))) .then().block(Duration.ofSeconds(receiveTimeoutMillis)); waitForMessages(latch2); // transactional messages received before commit sendMessages(count * 2, count); waitForMessages(latch3); checkConsumedMessages(0, count * 3); }
@Test public void autoAckPollWithIntervalWillNotFailOnOverflow() throws Exception { ReceiverOptions<Integer, String> options = receiverOptions.addAssignListener(this::onPartitionsAssigned) .commitInterval(Duration.ofMillis(10)) .subscription(Collections.singletonList(topic)); KafkaReceiver<Integer, String> receiver = KafkaReceiver.create(options); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = receiver.receiveAutoAck().concatMap(r -> r); CountDownLatch latch = new CountDownLatch(100); subscribe(kafkaFlux, latch); embeddedKafka.shutdownBroker(brokerId); Thread.sleep(3000); embeddedKafka.startBroker(brokerId); sendMessagesSync(0, 100); waitForMessages(latch); checkConsumedMessages(0, 100); waitForCommits(receiver, 100); }
@Test public void sendNonTransactionalReadCommitted() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); int count = 100; CountDownLatch latch1 = new CountDownLatch(count); CountDownLatch latch2 = new CountDownLatch(count * 3); subscribe(createReceiver().receive(), latch1, latch2); sendMessages(0, count); waitForMessages(latch1); // non-transactional messages received if no commits pending checkConsumedMessages(0, count); KafkaSender<Integer, String> txSender = createTransactionalSender(); TransactionManager txn = txSender.transactionManager(); txn.begin() .thenMany(txSender.send(createSenderRecords(count, count, true))) .blockLast(Duration.ofSeconds(receiveTimeoutMillis)); sendMessages(count * 2, count); Thread.sleep(1000); assertEquals(count * 2, latch2.getCount()); // non-transactional and transactional messages not received while commit pending txn.commit().subscribe(); waitForMessages(latch2); checkConsumedMessages(0, count * 3); }
@Test public void offsetResetLatest() throws Exception { int count = 10; sendMessages(0, count); receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") .addAssignListener(partitions -> assignSemaphore.release()); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive() .doOnNext(record -> onReceive(record)); StepVerifier.create(kafkaFlux) .then(() -> assignSemaphore.acquireUninterruptibly()) .expectNoEvent(Duration.ofMillis(100)) .then(() -> sendMessages(count, count)) .expectNextCount(count) .thenCancel() .verify(Duration.ofSeconds(receiveTimeoutMillis)); checkConsumedMessages(count, count); }
@Test public void brokerRestart() throws Exception { int sendBatchSize = 10; receiverOptions = receiverOptions.maxCommitAttempts(1000); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver() .receive() .doOnError(e -> log.error("KafkaFlux exception", e)); CountDownLatch receiveLatch = new CountDownLatch(sendBatchSize * 2); subscribe(kafkaFlux, receiveLatch); sendMessagesSync(0, sendBatchSize); shutdownKafkaBroker(); TestUtils.sleep(5000); restartKafkaBroker(); sendMessagesSync(sendBatchSize, sendBatchSize); waitForMessages(receiveLatch); checkConsumedMessages(); }
@Test public void abortTransaction() throws Exception { receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); Flux<? extends ConsumerRecord<Integer, String>> kafkaFlux = createReceiver().receive(); int count = 100; CountDownLatch latch1 = new CountDownLatch(count); CountDownLatch latch2 = new CountDownLatch(count * 2); subscribe(kafkaFlux, latch1, latch2); KafkaSender<Integer, String> txSender = createTransactionalSender(); txSender.transactionManager().begin() .thenMany(txSender.send(createSenderRecords(0, count, false))) .then(txSender.transactionManager().abort()) .then().block(Duration.ofSeconds(receiveTimeoutMillis)); sendMessages(count, count); waitForMessages(latch1); // non-transactional messages received if no commits pending checkConsumedMessages(count, count); txSender.sendTransactionally(Flux.just(createSenderRecords(count * 2, count, true))) .then().subscribe(); waitForMessages(latch2); checkConsumedMessages(count, count * 3); }
@Test public void transactionalOffsetCommit() throws Exception { String destTopic = "topic2"; createNewTopic(destTopic, partitions); int count = 10; kafkaSender.createOutbound().send(createProducerRecords(0, count, true)).then().block(Duration.ofSeconds(receiveTimeoutMillis)); String sourceConsumerGroupId = "source_consumer"; receiverOptions = receiverOptions.consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") .consumerProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, sourceConsumerGroupId); KafkaSender<Integer, String> txSender = createTransactionalSender(); KafkaReceiver<Integer, String> receiver = createReceiver(); receiveAndSendTransactions(receiver, txSender, destTopic, count, 4) .onErrorResume(e -> txSender.transactionManager().abort().thenMany(receiveAndSendTransactions(receiver, txSender, destTopic, count - 2, -1))) .blockLast(Duration.ofMillis(receiveTimeoutMillis)); // Check that exactly 'count' messages is committed on destTopic, with one copy of each message // from source topic receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, "dest-consumer") .subscription(Collections.singletonList(destTopic)) .clearAssignListeners() .addAssignListener(partitions -> assignSemaphore.release()); CountDownLatch latch = new CountDownLatch(count); subscribe(createReceiver().receive(), latch); waitForMessages(latch); checkConsumedMessages(0, count); }