private MetadataResponse newMetadataResponse(String topic, Errors error) { List<MetadataResponse.PartitionMetadata> partitionsMetadata = new ArrayList<>(); if (error == Errors.NONE) { Optional<MetadataResponse.TopicMetadata> foundMetadata = initialUpdateResponse.topicMetadata() .stream() .filter(topicMetadata -> topicMetadata.topic().equals(topic)) .findFirst(); foundMetadata.ifPresent(topicMetadata -> { partitionsMetadata.addAll(topicMetadata.partitionMetadata()); }); } MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(error, topic, false, partitionsMetadata); List<Node> brokers = new ArrayList<>(initialUpdateResponse.brokers()); return new MetadataResponse(brokers, initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), Collections.singletonList(topicMetadata)); }
/** * Transform a MetadataResponse into a new MetadataCache instance. */ private MetadataCache handleMetadataResponse(MetadataResponse metadataResponse, Predicate<String> topicsToRetain) { Set<String> internalTopics = new HashSet<>(); List<MetadataCache.PartitionInfoAndEpoch> partitions = new ArrayList<>(); for (MetadataResponse.TopicMetadata metadata : metadataResponse.topicMetadata()) { if (!topicsToRetain.test(metadata.topic())) continue; if (metadata.error() == Errors.NONE) { if (metadata.isInternal()) internalTopics.add(metadata.topic()); for (MetadataResponse.PartitionMetadata partitionMetadata : metadata.partitionMetadata()) { updatePartitionInfo(metadata.topic(), partitionMetadata, partitionInfo -> { int epoch = partitionMetadata.leaderEpoch().orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH); partitions.add(new MetadataCache.PartitionInfoAndEpoch(partitionInfo, epoch)); }); } } } return new MetadataCache(metadataResponse.clusterId(), new ArrayList<>(metadataResponse.brokers()), partitions, metadataResponse.topicsByError(Errors.TOPIC_AUTHORIZATION_FAILED), metadataResponse.topicsByError(Errors.INVALID_TOPIC_EXCEPTION), internalTopics, metadataResponse.controller()); }
@Override public void handleCompletedMetadataResponse(RequestHeader requestHeader, long now, MetadataResponse response) { this.metadataFetchInProgress = false; // If any partition has leader with missing listeners, log a few for diagnosing broker configuration // issues. This could be a transient issue if listeners were added dynamically to brokers. List<TopicPartition> missingListenerPartitions = response.topicMetadata().stream().flatMap(topicMetadata -> topicMetadata.partitionMetadata().stream() .filter(partitionMetadata -> partitionMetadata.error() == Errors.LISTENER_NOT_FOUND) .map(partitionMetadata -> new TopicPartition(topicMetadata.topic(), partitionMetadata.partition()))) .collect(Collectors.toList()); if (!missingListenerPartitions.isEmpty()) { int count = missingListenerPartitions.size(); log.warn("{} partitions have leader brokers without a matching listener, including {}", count, missingListenerPartitions.subList(0, Math.min(10, count))); } // check if any topics metadata failed to get updated Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) log.warn("Error while fetching metadata with correlation id {} : {}", requestHeader.correlationId(), errors); // don't update the cluster if there are no valid nodes...the topic we want may still be in the process of being // created which means we will get errors and no nodes until it exists if (response.brokers().isEmpty()) { log.trace("Ignoring empty metadata response with correlation id {}.", requestHeader.correlationId()); this.metadata.failedUpdate(now, null); } else { this.metadata.update(response, now); } }
public static MetadataResponse parse(ByteBuffer buffer, short version) { return new MetadataResponse(ApiKeys.METADATA.parseResponse(version, buffer)); }
private void handleMetadataResponse(RequestHeader header, Struct body, long now) { this.metadataFetchInProgress = false; MetadataResponse response = new MetadataResponse(body); Cluster cluster = response.cluster(); // don't update the cluster if there are no valid nodes...the topic we want may still be in the process of being // created which means we will get errors and no nodes until it exists if (cluster.nodes().size() > 0) { this.metadata.update(cluster, now); } else { log.trace("Ignoring empty metadata response with correlation id {}.", header.correlationId()); this.metadata.failedUpdate(now); } }
@Override public void handleResponse(AbstractResponse abstractResponse) { MetadataResponse response = (MetadataResponse) abstractResponse; long now = time.milliseconds(); metadataManager.update(response.cluster(), now); // Unassign all unsent requests after a metadata refresh to allow for a new // destination to be selected from the new metadata unassignUnsentCalls(node -> true); }
private Set<String> topics() { return updateResponse.topicMetadata().stream() .map(MetadataResponse.TopicMetadata::topic) .collect(Collectors.toSet()); } }
private Node controller(MetadataResponse response) { if (response.controller() == null || response.controller().id() == MetadataResponse.NO_CONTROLLER_ID) return null; return response.controller(); }
List<Node> replicaNodes = convertToNodes(brokers, replicas); List<Node> isrNodes = convertToNodes(brokers, isr); List<Node> offlineNodes = convertToNodes(brokers, offlineReplicas); this.controller = getControllerNode(controllerId, brokers.values()); this.topicMetadata = topicMetadata;
@Override void handleResponse(AbstractResponse abstractResponse) { MetadataResponse metadataResponse = (MetadataResponse) abstractResponse; Collection<Node> nodes = metadataResponse.brokers(); if (nodes.isEmpty()) throw new StaleMetadataException("Metadata fetch failed due to missing broker list");
@Override public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { List<MetadataResponse.TopicMetadata> topicMetadatas = new ArrayList<>(); Errors error = Errors.forException(e); List<MetadataResponse.PartitionMetadata> partitions = Collections.emptyList(); if (topics != null) { for (String topic : topics) topicMetadatas.add(new MetadataResponse.TopicMetadata(error, topic, false, partitions)); } short versionId = version(); switch (versionId) { case 0: case 1: case 2: return new MetadataResponse(Collections.emptyList(), null, MetadataResponse.NO_CONTROLLER_ID, topicMetadatas); case 3: case 4: case 5: case 6: case 7: return new MetadataResponse(throttleTimeMs, Collections.emptyList(), null, MetadataResponse.NO_CONTROLLER_ID, topicMetadatas); default: throw new IllegalArgumentException(String.format("Version %d is not valid. Valid versions for %s are 0 to %d", versionId, this.getClass().getSimpleName(), ApiKeys.METADATA.latestVersion())); } }
partitionNumByTopic.put(topicName2, 1); MetadataResponse updateMetadataResponse = TestUtils.metadataUpdateWith(2, partitionNumByTopic); Cluster updatedCluster = updateMetadataResponse.cluster();
@Override void handleResponse(AbstractResponse abstractResponse) { MetadataResponse response = (MetadataResponse) abstractResponse; Map<String, TopicListing> topicListing = new HashMap<>(); for (MetadataResponse.TopicMetadata topicMetadata : response.topicMetadata()) { String topicName = topicMetadata.topic(); boolean isInternal = topicMetadata.isInternal(); if (!topicMetadata.isInternal() || options.shouldListInternal()) topicListing.put(topicName, new TopicListing(topicName, isInternal)); } topicListingFuture.complete(topicListing); }
@Test public void testSendToInvalidTopic() throws Exception { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000"); Time time = new MockTime(); MetadataResponse initialUpdateResponse = TestUtils.metadataUpdateWith(1, emptyMap()); Metadata metadata = new Metadata(0, Long.MAX_VALUE, true); metadata.update(initialUpdateResponse, time.milliseconds()); MockClient client = new MockClient(time, metadata); Producer<String, String> producer = new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time); String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka"); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse( new ArrayList<>(initialUpdateResponse.brokers()), initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); Future<RecordMetadata> future = producer.send(record); assertEquals("Cluster has incorrect invalid topic list.", Collections.singleton(invalidTopicName), metadata.fetch().invalidTopics()); TestUtils.assertFutureError(future, InvalidTopicException.class); producer.close(Duration.ofMillis(0)); }
private static MetadataResponse emptyMetadataResponse() { return new MetadataResponse( Collections.emptyList(), null, -1, Collections.emptyList()); }
@Test public void testGetAllTopics() { // sending response before request, as getTopicMetadata is a blocking call client.prepareResponse(newMetadataResponse(topicName, Errors.NONE)); Map<String, List<PartitionInfo>> allTopics = fetcher.getAllTopicMetadata(time.timer(5000L)); assertEquals(initialUpdateResponse.topicMetadata().size(), allTopics.size()); }
for (MetadataResponse.TopicMetadata item : originalResponse.topicMetadata()) { List<MetadataResponse.PartitionMetadata> partitions = item.partitionMetadata(); List<MetadataResponse.PartitionMetadata> altPartitions = new ArrayList<>(); altTopics.add(alteredTopic); Node controller = originalResponse.controller(); MetadataResponse altered = new MetadataResponse( (List<Node>) originalResponse.brokers(), originalResponse.clusterId(), controller != null ? controller.id() : MetadataResponse.NO_CONTROLLER_ID, altTopics);