public ReceiverOptions<Integer, Person> receiverOptions(Collection<String> topics) { return receiverOptions() .addAssignListener(p -> log.info("Group {} partitions assigned {}", groupId, p)) .addRevokeListener(p -> log.info("Group {} partitions assigned {}", groupId, p)) .subscription(topics); }
public Disposable consumeMessages(String topic, CountDownLatch latch) { ReceiverOptions<Integer, String> options = receiverOptions.subscription(Collections.singleton(topic)) .addAssignListener(partitions -> log.debug("onPartitionsAssigned {}", partitions)) .addRevokeListener(partitions -> log.debug("onPartitionsRevoked {}", partitions)); Flux<ReceiverRecord<Integer, String>> kafkaFlux = KafkaReceiver.create(options).receive(); return kafkaFlux.subscribe(record -> { ReceiverOffset offset = record.receiverOffset(); System.out.printf("Received message: topic-partition=%s offset=%d timestamp=%s key=%d value=%s\n", offset.topicPartition(), offset.offset(), dateFormat.format(new Date(record.timestamp())), record.key(), record.value()); offset.acknowledge(); latch.countDown(); }); }
private void subscribeToDestTopic(String groupId, String topic, ReceiverOptions<Integer, Person> receiverOptions, List<Person> received) { receiverOptions = receiverOptions .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .addAssignListener(partitions -> { log.debug("Group {} assigned {}", groupId, partitions); partitions.forEach(p -> log.trace("Group {} partition {} position {}", groupId, p, p.position())); }) .addRevokeListener(p -> log.debug("Group {} revoked {}", groupId, p)); Disposable c = KafkaReceiver.create(receiverOptions.subscription(Collections.singleton(topic))) .receive() .subscribe(m -> { Person p = m.value(); received.add(p); log.debug("Thread {} Received from {}: {} ", Thread.currentThread().getName(), m.topic(), p); }); disposables.add(c); } private CommittableSource createTestSource(int count, List<Person> expected) {
@Before public void setUp() { topics = new ConcurrentHashMap<>(); for (int i : Arrays.asList(1, 2, 20, 200)) topics.put(i, "topic" + i); topic = topics.get(2); cluster = new MockCluster(2, topics); receiverOptions = ReceiverOptions.<Integer, String>create() .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.add(p.topicPartition()); }) .addRevokeListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.remove(p.topicPartition()); }); consumer = new MockConsumer(cluster); consumerFactory = new MockConsumer.Pool(Arrays.asList(consumer)); for (TopicPartition partition : cluster.partitions()) receiveStartOffsets.put(partition, 0L); }
assigned.incrementAndGet(); }) .addRevokeListener(p -> log.info("Revoked {} {} {}", Thread.currentThread().getName(), id, p)) .subscription(Collections.singletonList(topic)); kafkaFlux[i] = KafkaReceiver.create(receiverOptions).receive()
.addRevokeListener(partitions -> { for (ReceiverPartition p : partitions) { ReceiverOffset offset = assignedPartitions.remove(p);
.commitInterval(Duration.ZERO) .commitBatchSize(0) .addRevokeListener(partitions -> revoked.addAndGet(partitions.size())) .addAssignListener(this::seekToBeginning) .subscription(Collections.singletonList(topic));
@Before public void setUp() { cluster = new MockCluster(2, Collections.emptyMap()); cluster.addTopic(srcTopic, partitions); cluster.addTopic(destTopic, partitions); receiverOptions = ReceiverOptions.<Integer, String>create() .consumerProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) .consumerProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") .consumerProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") .consumerProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(maxPollRecords)) .addAssignListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.add(p.topicPartition()); }) .addRevokeListener(partitions -> { for (ReceiverPartition p : partitions) assignedPartitions.remove(p.topicPartition()); }) .subscription(Collections.singleton(srcTopic)); consumerFactory = new MockConsumer.Pool(Arrays.asList(new MockConsumer(cluster), new MockConsumer(cluster))); receiver = new DefaultKafkaReceiver<Integer, String>(consumerFactory, receiverOptions); for (TopicPartition partition : cluster.partitions()) receiveStartOffsets.put(partition, 0L); SenderOptions<Integer, String> senderOptions = SenderOptions.<Integer, String>create() .producerProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "exactlyOnce"); producer = new MockProducer(cluster); Pool producerFactory = new Pool(Arrays.asList(producer)); sender = new DefaultKafkaSender<>(producerFactory, senderOptions); }