/** * Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it * does not already have any metadata about the given topic. * * @param topic The topic to get partition metadata for * * @return The list of partitions * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the specified topic. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * the amount of time allocated by {@code default.api.timeout.ms} expires. */ @Override public List<PartitionInfo> partitionsFor(String topic) { return partitionsFor(topic, Duration.ofMillis(defaultApiTimeoutMs)); }
@Override public Set<Integer> getPartitionIds(String stream) { List<PartitionInfo> partitions = consumer.partitionsFor(stream); if (partitions == null) { throw new ISE("Topic [%s] is not found in KafkaConsumer's list of topics", stream); } return partitions.stream().map(PartitionInfo::partition).collect(Collectors.toSet()); }
private void initRetryQueuePartitionCountMap() { //get topic partition count retryQueuePartitionCount = new HashMap<>(4); for(String topicName:subscribedReconsumeKafkaTopics){ List<PartitionInfo> partitionsInfo = reconsumer.partitionsFor(topicName); retryQueuePartitionCount.put(topicName, partitionsInfo.size()); } }
private static List<TopicPartition> fetchTopicPartitions(String topic, KafkaConsumer<byte[], byte[]> consumer) { // this will block till REQUEST_TIMEOUT_MS_CONFIG = "request.timeout.ms" // then throws org.apache.kafka.common.errors.TimeoutException if can not fetch metadata // @TODO add retry logic maybe List<PartitionInfo> partitions = consumer.partitionsFor(topic); return partitions.stream().map(p -> new TopicPartition(topic, p.partition())).collect(Collectors.toList()); }
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets( KafkaConsumer<String, byte[]> client) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = client.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition key = new TopicPartition(topicStr, partition.partition()); OffsetAndMetadata offsetAndMetadata = client.committed(key); if (offsetAndMetadata != null) { offsets.put(key, offsetAndMetadata); } } return offsets; }
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets( KafkaConsumer<String, byte[]> client, String topicStr) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = client.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition key = new TopicPartition(topicStr, partition.partition()); OffsetAndMetadata offsetAndMetadata = client.committed(key); if (offsetAndMetadata != null) { offsets.put(key, offsetAndMetadata); } } return offsets; }
private void prepareConsumers() { int numConsumers = _consumers.size(); List<List<TopicPartition>> assignments = new ArrayList<>(); for (int i = 0; i < numConsumers; i++) { assignments.add(new ArrayList<>()); } int j = 0; for (String topic : Arrays.asList(_partitionMetricSampleStoreTopic, _brokerMetricSampleStoreTopic)) { for (PartitionInfo partInfo : _consumers.get(0).partitionsFor(topic)) { assignments.get(j++ % numConsumers).add(new TopicPartition(partInfo.topic(), partInfo.partition())); } } for (int i = 0; i < numConsumers; i++) { _consumers.get(i).assign(assignments.get(i)); } }
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } return partitions; }
@Override protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws AbstractPartitionDiscoverer.WakeupException { List<KafkaTopicPartition> partitions = new LinkedList<>(); try { for (String topic : topics) { for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) { partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new AbstractPartitionDiscoverer.WakeupException(); } return partitions; }
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; }
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer, String topicStr) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; } }
try (KafkaConsumer<String, String> consumer = KafkaClient.getKafkaConsumer(brokers, consumerGroup, kafkaProperties)) { final List<PartitionInfo> partitionInfos = consumer.partitionsFor(inputTopic); Preconditions.checkArgument(partitionInfos.size() == startOffsetMap.size(), "partition number mismatch with server side");
@Override public String formatAsText() { try (KafkaConsumer consumer = new KafkaConsumer(consumerProperties) { }) { //noinspection unchecked List<PartitionInfo> partitionsInfo = consumer.partitionsFor(topic); List<TopicPartition> topicPartitions = partitionsInfo.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); Map endOffsets = consumer.endOffsets(topicPartitions); Map startOffsets = consumer.beginningOffsets(topicPartitions); return partitionsInfo.stream() .map(partitionInfo -> String.format("%s [start offset = [%s], end offset = [%s]]", partitionInfo.toString(), startOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())), endOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())))) .collect(Collectors.joining("\n")); } catch (Exception e) { return String.format("ERROR fetching metadata for Topic [%s], Connection String [%s], Error [%s]", topic, consumerProperties.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), e.getMessage()); } } }
@Test public void testFilterOnAbsentTopic() { String presentTopic = "present"; String absentTopic = "absent"; NamedTopicFilter filter = new NamedTopicFilter(presentTopic, absentTopic); when(consumerMock.partitionsFor(presentTopic)).thenReturn(Collections.singletonList(createPartitionInfo(presentTopic, 2))); when(consumerMock.partitionsFor(absentTopic)).thenReturn(null); Set<TopicPartition> presentPartitions = filter.getAllSubscribedPartitions(consumerMock); assertThat("Expected filter to pass only topics which are present", presentPartitions, contains(new TopicPartition(presentTopic, 2))); }
@Test(expected = AuthenticationException.class) public void testPartitionsForAuthenticationFailure() { final KafkaConsumer<String, String> consumer = consumerWithPendingAuthentication(); consumer.partitionsFor("some other topic"); }
@Test public void testFilter() { String matchingTopicOne = "test-1"; String matchingTopicTwo = "test-11"; String unmatchedTopic = "unmatched"; NamedTopicFilter filter = new NamedTopicFilter(matchingTopicOne, matchingTopicTwo); when(consumerMock.partitionsFor(matchingTopicOne)).thenReturn(Collections.singletonList(createPartitionInfo(matchingTopicOne, 0))); List<PartitionInfo> partitionTwoPartitions = new ArrayList<>(); partitionTwoPartitions.add(createPartitionInfo(matchingTopicTwo, 0)); partitionTwoPartitions.add(createPartitionInfo(matchingTopicTwo, 1)); when(consumerMock.partitionsFor(matchingTopicTwo)).thenReturn(partitionTwoPartitions); when(consumerMock.partitionsFor(unmatchedTopic)).thenReturn(Collections.singletonList(createPartitionInfo(unmatchedTopic, 0))); Set<TopicPartition> matchedPartitions = filter.getAllSubscribedPartitions(consumerMock); assertThat("Expected filter to pass only topics with exact name matches", matchedPartitions, containsInAnyOrder(new TopicPartition(matchingTopicOne, 0), new TopicPartition(matchingTopicTwo, 0), new TopicPartition(matchingTopicTwo, 1))); }
public TopicStreamWriter( final SchemaRegistryClient schemaRegistryClient, final Map<String, Object> consumerProperties, final String topicName, final long interval, final Duration disconnectCheckInterval, final boolean fromBeginning ) { this.schemaRegistryClient = schemaRegistryClient; this.topicName = topicName; this.messagesWritten = 0; this.disconnectCheckInterval = Objects .requireNonNull(disconnectCheckInterval, "disconnectCheckInterval"); this.topicConsumer = new KafkaConsumer<>( consumerProperties, new StringDeserializer(), new BytesDeserializer() ); final List<TopicPartition> topicPartitions = topicConsumer.partitionsFor(topicName) .stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); topicConsumer.assign(topicPartitions); if (fromBeginning) { topicConsumer.seekToBeginning(topicPartitions); } this.interval = interval; }
@Override public void subscribe(final Flow.Subscriber<Collection<String>> subscriber) { final KafkaConsumer<String, Bytes> topicConsumer = new KafkaConsumer<>( consumerProperties, new StringDeserializer(), new BytesDeserializer() ); log.info("Running consumer for topic {}", topicName); final List<TopicPartition> topicPartitions = topicConsumer.partitionsFor(topicName) .stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); topicConsumer.assign(topicPartitions); if (fromBeginning) { topicConsumer.seekToBeginning(topicPartitions); } subscriber.onSubscribe( new PrintSubscription( subscriber, topicConsumer, new RecordFormatter(schemaRegistryClient, topicName) ) ); }
public static Map<Integer, Long> getLatestOffsets(final CubeInstance cubeInstance) { final KafkaConfig kafkaConfig = KafkaConfigManager.getInstance(KylinConfig.getInstanceFromEnv()).getKafkaConfig(cubeInstance.getRootFactTable()); final String brokers = KafkaClient.getKafkaBrokers(kafkaConfig); final String topic = kafkaConfig.getTopic(); Map<Integer, Long> startOffsets = Maps.newHashMap(); try (final KafkaConsumer consumer = KafkaClient.getKafkaConsumer(brokers, cubeInstance.getName(), null)) { final List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic); for (PartitionInfo partitionInfo : partitionInfos) { long latest = getLatestOffset(consumer, topic, partitionInfo.partition()); startOffsets.put(partitionInfo.partition(), latest); } } return startOffsets; }
public static Map<Integer, Long> getEarliestOffsets(final CubeInstance cubeInstance) { final KafkaConfig kafkaConfig = KafkaConfigManager.getInstance(KylinConfig.getInstanceFromEnv()).getKafkaConfig(cubeInstance.getRootFactTable()); final String brokers = KafkaClient.getKafkaBrokers(kafkaConfig); final String topic = kafkaConfig.getTopic(); Map<Integer, Long> startOffsets = Maps.newHashMap(); try (final KafkaConsumer consumer = KafkaClient.getKafkaConsumer(brokers, cubeInstance.getName(), null)) { final List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic); for (PartitionInfo partitionInfo : partitionInfos) { long latest = getEarliestOffset(consumer, topic, partitionInfo.partition()); startOffsets.put(partitionInfo.partition(), latest); } } return startOffsets; } }