@Test public void testAssignOnEmptyTopicPartition() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId)) { consumer.assign(Collections.<TopicPartition>emptyList()); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); } }
@Test public void testSubscription() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId); consumer.subscribe(singletonList(topic)); assertEquals(singleton(topic), consumer.subscription()); assertTrue(consumer.assignment().isEmpty()); consumer.subscribe(Collections.<String>emptyList()); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); consumer.assign(singletonList(tp0)); assertTrue(consumer.subscription().isEmpty()); assertEquals(singleton(tp0), consumer.assignment()); consumer.unsubscribe(); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); consumer.close(); }
@Test public void testChangingRegexSubscription() { PartitionAssignor assignor = new RoundRobinAssignor(); String otherTopic = "other"; TopicPartition otherTopicPartition = new TopicPartition(otherTopic, 0); Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); Map<String, Integer> partitionCounts = new HashMap<>(); partitionCounts.put(topic, 1); partitionCounts.put(otherTopic, 1); initMetadata(client, partitionCounts); Node node = metadata.fetch().nodes().get(0); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, false); Node coordinator = prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null); consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer)); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); assertEquals(singleton(topic), consumer.subscription()); consumer.subscribe(Pattern.compile(otherTopic), getConsumerRebalanceListener(consumer)); client.prepareMetadataUpdate(TestUtils.metadataUpdateWith(1, partitionCounts)); prepareRebalance(client, node, singleton(otherTopic), assignor, singletonList(otherTopicPartition), coordinator); consumer.poll(Duration.ZERO); assertEquals(singleton(otherTopic), consumer.subscription()); consumer.close(Duration.ofMillis(0)); }
@Test public void testRegexSubscription() { String unmatchedTopic = "unmatched"; Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); Map<String, Integer> partitionCounts = new HashMap<>(); partitionCounts.put(topic, 1); partitionCounts.put(unmatchedTopic, 1); initMetadata(client, partitionCounts); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null); consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer)); client.prepareMetadataUpdate(TestUtils.metadataUpdateWith(1, partitionCounts)); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); assertEquals(singleton(topic), consumer.subscription()); assertEquals(singleton(tp0), consumer.assignment()); consumer.close(Duration.ofMillis(0)); }
assertTrue(consumer.subscription().size() == 2); assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic2)); assertTrue(consumer.assignment().isEmpty()); assertEquals(2, consumer.subscription().size()); assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic2)); assertEquals(2, consumer.assignment().size()); assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t2p0)); assertTrue(consumer.subscription().size() == 2); assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic3)); assertTrue(consumer.assignment().size() == 2); assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t2p0)); assertTrue(consumer.subscription().size() == 2); assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic3)); assertTrue(consumer.assignment().size() == 2); assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t3p0)); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty());
assertEquals(singleton(topic), consumer.subscription()); assertEquals(Collections.emptySet(), consumer.assignment()); assertEquals(singleton(topic), consumer.subscription()); assertEquals(singleton(tp0), consumer.assignment()); assertEquals(singleton(topic2), consumer.subscription()); assertEquals(singleton(tp0), consumer.assignment()); assertEquals(Collections.emptySet(), consumer.subscription()); assertEquals(Collections.emptySet(), consumer.assignment());
public Map<Integer, Long> getKafkaOffsetMap(KafkaConsumer<String, String> consumer, String topic ) { Map<Integer, Long> ret = new HashMap<>(); if(!consumer.subscription().contains(topic)) { consumer.subscribe(Collections.singletonList(topic)); } consumer.poll(0); List<TopicPartition> partitions = getTopicPartition(consumer, topic); consumer.seekToEnd(partitions); for(TopicPartition par : partitions) { ret.put(par.partition(), consumer.position(par)-1); } return ret; } }
/** * Method for fetching data from Apache Kafka. It takes care of received * data */ private void fetchDataFromKafka() { if (consumer != null) { if (!consumer.subscription().isEmpty()) { try { List<byte[]> kafkaMsg = getMessagesBytes(consumer.poll(consumerTimeout)); fillBufferAndNotifyWaits(kafkaMsg); } catch (Throwable t) { Logger.getLogger(KafkaConsumerThread.class.getName()).log(Level.SEVERE, null, t); } } } }
@Override public void onPartitionsAssigned(final Collection<TopicPartition> partitions) { // Handle partitions with no explicit offset. Explicit offset should be committed for every new partition-consumer group, otherwise messages will be lost if offset reset strategy is "latest". if (!isAtMostOnceOffsetCommitMode) { synchronized (kafkaConsumer) { for (final TopicPartition topicPartition : partitions) { final OffsetAndMetadata currentOffsetMetadata = kafkaConsumer.committed(topicPartition); initializeOffsetIfNeeded(topicPartition, currentOffsetMetadata); } } if (!consumedOffsets.isEmpty()) { commitOffsetsInternal(); } currentSubscription.clear(); currentSubscription.addAll(kafkaConsumer.subscription()); } }