private void onCommit(List<ReceiverOffset> offsets, CountDownLatch commitLatch, long[] committedOffsets) { for (ReceiverOffset offset : offsets) { committedOffsets[offset.topicPartition().partition()] = offset.offset() + 1; commitLatch.countDown(); } offsets.clear(); }
public Disposable consumeMessages(String topic, CountDownLatch latch) { ReceiverOptions<Integer, String> options = receiverOptions.subscription(Collections.singleton(topic)) .addAssignListener(partitions -> log.debug("onPartitionsAssigned {}", partitions)) .addRevokeListener(partitions -> log.debug("onPartitionsRevoked {}", partitions)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(options).receive(); return kafkaFlux.subscribe(record -> { ReceiverOffset offset = record.receiverOffset(); System.out.printf("Received message: topic-partition=%s offset=%d timestamp=%s key=%d value=%s\n", offset.topicPartition(), offset.offset(), dateFormat.format(new Date(record.timestamp())), record.key(), record.value()); offset.acknowledge(); latch.countDown(); }); }
public Flux<?> flux() { Scheduler scheduler = Schedulers.newElastic("sample", 60, true); return KafkaReceiver.create(receiverOptions(Collections.singleton(topic)).commitInterval(Duration.ZERO)) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .flatMap(partitionFlux -> partitionFlux.publishOn(scheduler) .map(r -> processRecord(partitionFlux.key(), r)) .sample(Duration.ofMillis(5000)) .concatMap(offset -> offset.commit())) .doOnCancel(() -> close()); } public ReceiverOffset processRecord(TopicPartition topicPartition, ReceiverRecord<Integer, Person> message) {
/** * Tests that assign callbacks are invoked before any records are delivered * when partitions are assigned using group management. */ @Test public void assignCallback() { receiverOptions = receiverOptions.subscription(Collections.singleton(topic)); sendMessages(topic, 0, 10); receiveAndVerify(10, r -> { assertTrue("Assign callback not invoked", assignedPartitions.contains(r.receiverOffset().topicPartition())); return Mono.just(r); }); }
/** * Send and receive using manual assignment of partitions. */ @Test public void manualAssignment() { receiverOptions = receiverOptions.assignment(cluster.partitions(topic)); sendMessages(topic, 0, 10); receiveAndVerify(10, r -> { assertTrue("Assign callback not invoked", assignedPartitions.contains(r.receiverOffset().topicPartition())); return Mono.just(r); }); }
Disposable disposable = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receive() .groupBy(m -> m.receiverOffset().topicPartition()) .subscribe(partitionFlux -> groupDisposables.add(partitionFlux.publishOn(scheduler, 1).subscribe(record -> { int partition = record.partition();
/** * Tests that only acknowledged offsets are committed with manual-ack using * {@link KafkaReceiver#receive()}. */ @Test public void manualAck() { receiverOptions = receiverOptions .subscription(Collections.singleton(topic)) .commitBatchSize(1); Map<TopicPartition, Long> acknowledged = new ConcurrentHashMap<>(); for (TopicPartition partition : cluster.partitions(topic)) acknowledged.put(partition, -1L); sendMessages(topic, 0, 20); receiveAndVerify(10, r -> { ReceiverOffset offset = r.receiverOffset(); TopicPartition partition = offset.topicPartition(); Long committedOffset = cluster.committedOffset(groupId, partition); boolean valid = committedOffset == null || acknowledged.get(partition) >= committedOffset - 1; if (offset.offset() % 3 == 0) { offset.acknowledge(); acknowledged.put(partition, offset.offset()); } assertTrue("Unexpected commit state", valid); return Mono.just(r); }); for (Map.Entry<TopicPartition, Long> entry : acknowledged.entrySet()) { Long committedOffset = cluster.committedOffset(groupId, entry.getKey()); assertEquals(entry.getValue() + 1, committedOffset.longValue()); } }
private void verifyCommit(ReceiverRecord<Integer, String> r, long lastCommitted) { TopicPartition partition = r.receiverOffset().topicPartition(); Long committedOffset = cluster.committedOffset(groupId, partition); long offset = r.receiverOffset().offset(); if (lastCommitted >= 0 && offset == lastCommitted) { TestUtils.waitUntil("Offset not committed", null, p -> cluster.committedOffset(groupId, p) == (Long) (offset + 1), partition, Duration.ofSeconds(1)); } committedOffset = cluster.committedOffset(groupId, partition); assertEquals(committedOffset, lastCommitted == -1 ? null : lastCommitted + 1); }
kafkaFlux.groupBy(m -> m.receiverOffset().topicPartition()) .subscribe(partitionFlux -> subscribeDisposables.add(partitionFlux.publishOn(scheduler).subscribe(record -> { int partition = record.partition();
.receive() .doOnNext(m -> { assertTrue(assignedPartitions.containsKey(m.receiverOffset().topicPartition())); assignedPartitions.put(m.receiverOffset().topicPartition(), m.receiverOffset()); receiveLatch.countDown(); })
Disposable disposable = new DefaultKafkaReceiver<>(consumerFactory, receiverOptions) .receive() .groupBy(m -> m.receiverOffset().topicPartition().partition()) .subscribe(partitionFlux -> groupDisposables.add(partitionFlux.take(countPerPartition).publishOn(scheduler, 1).subscribe(record -> { String thread = Thread.currentThread().getName();