/** * Get metadata about partitions for all topics that the user is authorized to view. This method will issue a * remote call to the server. * @return The map of topics and its partitions * * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * the amount of time allocated by {@code default.api.timeout.ms} expires. */ @Override public Map<String, List<PartitionInfo>> listTopics() { return listTopics(Duration.ofMillis(defaultApiTimeoutMs)); }
@Override protected List<String> getAllTopics() throws WakeupException { try { return new ArrayList<>(kafkaConsumer.listTopics().keySet()); } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } }
@Override protected List<String> getAllTopics() throws AbstractPartitionDiscoverer.WakeupException { try { return new ArrayList<>(kafkaConsumer.listTopics().keySet()); } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new AbstractPartitionDiscoverer.WakeupException(); } }
@Override public boolean exists() { boolean exists = false; try (KafkaConsumer<String, String> historyConsumer = new KafkaConsumer<>(consumerConfig.asProperties());) { // First, check if the topic exists in the list of all topics if (historyConsumer.listTopics().keySet().contains(topicName)) { // check if the topic is empty Set<TopicPartition> historyTopic = Collections.singleton(new TopicPartition(topicName, PARTITION)); Map<TopicPartition, Long> beginningOffsets = historyConsumer.beginningOffsets(historyTopic); Map<TopicPartition, Long> endOffsets = historyConsumer.endOffsets(historyTopic); Long beginOffset = beginningOffsets.entrySet().iterator().next().getValue(); Long endOffset = endOffsets.entrySet().iterator().next().getValue(); exists = endOffset > beginOffset; } } return exists; }
ZkUtils zkUtils = KafkaCruiseControlUtils.createZkUtils(zkConnect); try { Map<String, List<PartitionInfo>> topics = _consumers.get(0).listTopics(); long partitionSampleWindowMs = Long.parseLong((String) config.get(KafkaCruiseControlConfig.PARTITION_METRICS_WINDOW_MS_CONFIG)); long brokerSampleWindowMs = Long.parseLong((String) config.get(KafkaCruiseControlConfig.BROKER_METRICS_WINDOW_MS_CONFIG));
@Override public Set<String> getTableNames() { if (tableNames == null) { try (KafkaConsumer<?, ?> kafkaConsumer = new KafkaConsumer<>(plugin.getConfig().getKafkaConsumerProps())) { tableNames = kafkaConsumer.listTopics().keySet(); } catch(KafkaException e) { throw UserException.dataReadError(e).message("Failed to get tables information").addContext(e.getMessage()) .build(logger); } } return tableNames; } }
@Test public void testFilter() { Pattern pattern = Pattern.compile("test-\\d+"); PatternTopicFilter filter = new PatternTopicFilter(pattern); String matchingTopicOne = "test-1"; String matchingTopicTwo = "test-11"; String unmatchedTopic = "unmatched"; Map<String, List<PartitionInfo>> allTopics = new HashMap<>(); allTopics.put(matchingTopicOne, Collections.singletonList(createPartitionInfo(matchingTopicOne, 0))); List<PartitionInfo> testTwoPartitions = new ArrayList<>(); testTwoPartitions.add(createPartitionInfo(matchingTopicTwo, 0)); testTwoPartitions.add(createPartitionInfo(matchingTopicTwo, 1)); allTopics.put(matchingTopicTwo, testTwoPartitions); allTopics.put(unmatchedTopic, Collections.singletonList(createPartitionInfo(unmatchedTopic, 0))); when(consumerMock.listTopics()).thenReturn(allTopics); Set<TopicPartition> matchedPartitions = filter.getAllSubscribedPartitions(consumerMock); assertThat("Expected topic partitions matching the pattern to be passed by the filter", matchedPartitions, containsInAnyOrder(new TopicPartition(matchingTopicOne, 0), new TopicPartition(matchingTopicTwo, 0), new TopicPartition(matchingTopicTwo, 1))); }
if (!kafkaConsumer.listTopics().keySet().contains(topicName)) { throw UserException.dataReadError() .message("Table '%s' does not exist", topicName)
try { @SuppressWarnings("unchecked") Map<String, List<PartitionInfo>> topics = consumer.listTopics();
/** * Gets the last offset for each partition for the given topic. */ @SuppressWarnings("unchecked") public Map<Integer, Long> getEndingOffsets(String kafkaBrokers, String topic) { Map<Integer, Long> retval = new HashMap<>(); KafkaConsumer consumer = buildConsumer(kafkaBrokers); try { Map<String, List<PartitionInfo>> topics = consumer.listTopics(); List<PartitionInfo> partitionInfos = topics.get(topic); if (partitionInfos == null) { logger.warn("Partition information was not found for topic {}", topic); } else { Collection<TopicPartition> partitions = new ArrayList<>(); for (PartitionInfo partitionInfo : partitionInfos) { partitions.add(new TopicPartition(topic, partitionInfo.partition())); } Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions); for (TopicPartition partition : endingOffsets.keySet()) { retval.put(partition.partition(), endingOffsets.get(partition)); } } } finally { consumer.close(); } return retval; }
/** * Gets the total message count for the topic. * <b>WARNING: Don't use with compacted topics</b> */ @SuppressWarnings("unchecked") public long getCount(String kafkaBrokers, String topic) { KafkaConsumer consumer = buildConsumer(kafkaBrokers); try { @SuppressWarnings("unchecked") Map<String, List<PartitionInfo>> topics = consumer.listTopics(); List<PartitionInfo> partitionInfos = topics.get(topic); if (partitionInfos == null) { logger.warn("Partition information was not found for topic {}", topic); return 0; } else { Collection<TopicPartition> partitions = new ArrayList<>(); for (PartitionInfo partitionInfo : partitionInfos) { TopicPartition partition = new TopicPartition(topic, partitionInfo.partition()); partitions.add(partition); } Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions); Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions); return diffOffsets(beginningOffsets, endingOffsets); } } finally { consumer.close(); } }
private Map<String, List<PartitionInfo>> getTopicPartitionInfoMap() { KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(zkUrl, securityProtocol, consumerConfigs); Map<String, List<PartitionInfo>> topicPartitonInfoMap = kafkaConsumer.listTopics(); return topicPartitonInfoMap; }
@Override protected List<String> getAllTopics() throws WakeupException { try { return new ArrayList<>(kafkaConsumer.listTopics().keySet()); } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } }
@Override protected List<String> getAllTopics() throws WakeupException { try { return new ArrayList<>(kafkaConsumer.listTopics().keySet()); } catch (org.apache.kafka.common.errors.WakeupException e) { // rethrow our own wakeup exception throw new WakeupException(); } }
public JsonElement toJson() { // Return a JSON representation of a Kafka Cluster. JsonObject json = new JsonObject(); KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(zkUrl, securityProtocol, consumerConfigs); json.addProperty("zkUrl", zkUrl); json.add("bytesInLimit", gson.toJsonTree(bytesInLimit)); json.add("bytesOutLimit", gson.toJsonTree(bytesOutLimit)); json.add("underReplicatedPartitions", gson.toJsonTree(underReplicatedPartitions)); json.add("topicPartitionAssignments", gson.toJsonTree(topicPartitionAssignments)); json.add("kafkaCluster", gson.toJsonTree(kafkaCluster.toJson())); json.add("topics", gson.toJsonTree(kafkaConsumer.listTopics())); return json; }
@Override public List<TopicPartition> getFilteredTopicPartitions(KafkaConsumer<?, ?> consumer) { topics.clear(); List<TopicPartition> allPartitions = new ArrayList<>(); for (Map.Entry<String, List<PartitionInfo>> entry : consumer.listTopics().entrySet()) { if (pattern.matcher(entry.getKey()).matches()) { for (PartitionInfo partitionInfo : entry.getValue()) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); topics.add(partitionInfo.topic()); } } } return allPartitions; }
@Override public Set<String> listTopic() { return KafkaConnection.createConsumer(dataset.getDatastoreProperties()).listTopics().keySet(); }
@Override public Set<String> listTopic() { return KafkaConnection.createConsumer(dataset.getDatastoreProperties()).listTopics().keySet(); }
@Override public Iterable<ValidationResult> doHealthChecks(RuntimeContainer container) { String bootstraps = datastore.brokers.getValue(); if (bootstraps == null || "".equals(bootstraps)) { return Arrays .asList(new ValidationResult(ValidationResult.Result.ERROR, "Bootstrap server urls should not be empty")); } try { KafkaConnection.createConsumer(datastore).listTopics(); } catch (Throwable exception) { return Arrays .asList(new ValidationResult(ValidationResult.Result.ERROR, exception.getMessage())); } return Arrays.asList(ValidationResult.OK); }
@Override public Iterable<ValidationResult> doHealthChecks(RuntimeContainer container) { String bootstraps = datastore.brokers.getValue(); if (bootstraps == null || "".equals(bootstraps)) { return Arrays .asList(new ValidationResult(ValidationResult.Result.ERROR, "Bootstrap server urls should not be empty")); } try { KafkaConnection.createConsumer(datastore).listTopics(); } catch (Throwable exception) { return Arrays .asList(new ValidationResult(ValidationResult.Result.ERROR, exception.getMessage())); } return Arrays.asList(ValidationResult.OK); }