@Override void handleResponse(AbstractResponse abstractResponse) { MetadataResponse response = (MetadataResponse) abstractResponse; describeClusterFuture.complete(response.brokers()); controllerFuture.complete(controller(response)); clusterIdFuture.complete(response.clusterId()); }
/** * Transform a MetadataResponse into a new MetadataCache instance. */ private MetadataCache handleMetadataResponse(MetadataResponse metadataResponse, Predicate<String> topicsToRetain) { Set<String> internalTopics = new HashSet<>(); List<MetadataCache.PartitionInfoAndEpoch> partitions = new ArrayList<>(); for (MetadataResponse.TopicMetadata metadata : metadataResponse.topicMetadata()) { if (!topicsToRetain.test(metadata.topic())) continue; if (metadata.error() == Errors.NONE) { if (metadata.isInternal()) internalTopics.add(metadata.topic()); for (MetadataResponse.PartitionMetadata partitionMetadata : metadata.partitionMetadata()) { updatePartitionInfo(metadata.topic(), partitionMetadata, partitionInfo -> { int epoch = partitionMetadata.leaderEpoch().orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH); partitions.add(new MetadataCache.PartitionInfoAndEpoch(partitionInfo, epoch)); }); } } } return new MetadataCache(metadataResponse.clusterId(), new ArrayList<>(metadataResponse.brokers()), partitions, metadataResponse.topicsByError(Errors.TOPIC_AUTHORIZATION_FAILED), metadataResponse.topicsByError(Errors.INVALID_TOPIC_EXCEPTION), internalTopics, metadataResponse.controller()); }
private MetadataResponse newMetadataResponse(String topic, Errors error) { List<MetadataResponse.PartitionMetadata> partitionsMetadata = new ArrayList<>(); if (error == Errors.NONE) { Optional<MetadataResponse.TopicMetadata> foundMetadata = initialUpdateResponse.topicMetadata() .stream() .filter(topicMetadata -> topicMetadata.topic().equals(topic)) .findFirst(); foundMetadata.ifPresent(topicMetadata -> { partitionsMetadata.addAll(topicMetadata.partitionMetadata()); }); } MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(error, topic, false, partitionsMetadata); List<Node> brokers = new ArrayList<>(initialUpdateResponse.brokers()); return new MetadataResponse(brokers, initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), Collections.singletonList(topicMetadata)); }
@Test public void testSendToInvalidTopic() throws Exception { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000"); Time time = new MockTime(); MetadataResponse initialUpdateResponse = TestUtils.metadataUpdateWith(1, emptyMap()); Metadata metadata = new Metadata(0, Long.MAX_VALUE, true); metadata.update(initialUpdateResponse, time.milliseconds()); MockClient client = new MockClient(time, metadata); Producer<String, String> producer = new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time); String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka"); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse( new ArrayList<>(initialUpdateResponse.brokers()), initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); Future<RecordMetadata> future = producer.send(record); assertEquals("Cluster has incorrect invalid topic list.", Collections.singleton(invalidTopicName), metadata.fetch().invalidTopics()); TestUtils.assertFutureError(future, InvalidTopicException.class); producer.close(Duration.ofMillis(0)); }
MetadataResponse altered = new MetadataResponse( (List<Node>) originalResponse.brokers(), originalResponse.clusterId(), controller != null ? controller.id() : MetadataResponse.NO_CONTROLLER_ID, altTopics);