@Override public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { List<MetadataResponse.TopicMetadata> topicMetadatas = new ArrayList<>(); Errors error = Errors.forException(e); List<MetadataResponse.PartitionMetadata> partitions = Collections.emptyList(); if (topics != null) { for (String topic : topics) topicMetadatas.add(new MetadataResponse.TopicMetadata(error, topic, false, partitions)); } short versionId = version(); switch (versionId) { case 0: case 1: case 2: return new MetadataResponse(Collections.emptyList(), null, MetadataResponse.NO_CONTROLLER_ID, topicMetadatas); case 3: case 4: case 5: case 6: case 7: return new MetadataResponse(throttleTimeMs, Collections.emptyList(), null, MetadataResponse.NO_CONTROLLER_ID, topicMetadatas); default: throw new IllegalArgumentException(String.format("Version %d is not valid. Valid versions for %s are 0 to %d", versionId, this.getClass().getSimpleName(), ApiKeys.METADATA.latestVersion())); } }
private MetadataResponse createMetadataResponse() { Node node = new Node(1, "host1", 1001); List<Node> replicas = asList(node); List<Node> isr = asList(node); List<Node> offlineReplicas = asList(); List<MetadataResponse.TopicMetadata> allTopicMetadata = new ArrayList<>(); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, "__consumer_offsets", true, asList(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, node, Optional.of(5), replicas, isr, offlineReplicas)))); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.LEADER_NOT_AVAILABLE, "topic2", false, Collections.emptyList())); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, "topic3", false, asList(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, 0, null, Optional.empty(), replicas, isr, offlineReplicas)))); return new MetadataResponse(asList(node), null, MetadataResponse.NO_CONTROLLER_ID, allTopicMetadata); }
public static MetadataResponse metadataUpdateWith(final String clusterId, final int numNodes, final Map<String, Errors> topicErrors, final Map<String, Integer> topicPartitionCounts, final PartitionMetadataSupplier partitionSupplier) { final List<Node> nodes = new ArrayList<>(numNodes); for (int i = 0; i < numNodes; i++) nodes.add(new Node(i, "localhost", 1969 + i)); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); for (Map.Entry<String, Integer> topicPartitionCountEntry : topicPartitionCounts.entrySet()) { String topic = topicPartitionCountEntry.getKey(); int numPartitions = topicPartitionCountEntry.getValue(); List<MetadataResponse.PartitionMetadata> partitionMetadata = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) { Node leader = nodes.get(i % nodes.size()); List<Node> replicas = Collections.singletonList(leader); partitionMetadata.add(partitionSupplier.supply( Errors.NONE, i, leader, Optional.empty(), replicas, replicas, replicas)); } topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, topic, Topic.isInternal(topic), partitionMetadata)); } for (Map.Entry<String, Errors> topicErrorEntry : topicErrors.entrySet()) { String topic = topicErrorEntry.getKey(); topicMetadata.add(new MetadataResponse.TopicMetadata(topicErrorEntry.getValue(), topic, Topic.isInternal(topic), Collections.emptyList())); } return new MetadataResponse(nodes, clusterId, 0, topicMetadata); }
singletonList(nodes.get(0)), singletonList(nodes.get(0)), Collections.emptyList())); t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
topicMetadata.add(new TopicMetadata(topicError, topic, isInternal, partitionMetadata));
); MetadataResponse.TopicMetadata alteredTopic = new MetadataResponse.TopicMetadata( item.error(), item.topic(),
@Test public void testSendToInvalidTopic() throws Exception { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000"); Time time = new MockTime(); MetadataResponse initialUpdateResponse = TestUtils.metadataUpdateWith(1, emptyMap()); Metadata metadata = new Metadata(0, Long.MAX_VALUE, true); metadata.update(initialUpdateResponse, time.milliseconds()); MockClient client = new MockClient(time, metadata); Producer<String, String> producer = new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time); String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka"); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse( new ArrayList<>(initialUpdateResponse.brokers()), initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); Future<RecordMetadata> future = producer.send(record); assertEquals("Cluster has incorrect invalid topic list.", Collections.singleton(invalidTopicName), metadata.fetch().invalidTopics()); TestUtils.assertFutureError(future, InvalidTopicException.class); producer.close(Duration.ofMillis(0)); }
env.kafkaClient().prepareResponse(new MetadataResponse(initializedCluster.nodes(), initializedCluster.clusterResource().clusterId(), 1, singletonList(new MetadataResponse.TopicMetadata(Errors.NONE, topic, false, singletonList(partitionMetadata)))));
@Test(expected = InvalidTopicException.class) public void testSubscriptionOnInvalidTopic() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Cluster cluster = metadata.fetch(); PartitionAssignor assignor = new RoundRobinAssignor(); String invalidTopicName = "topic abc"; // Invalid topic name due to space List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(invalidTopicName), getConsumerRebalanceListener(consumer)); consumer.poll(Duration.ZERO); } }
private MetadataResponse newMetadataResponse(String topic, Errors error) { List<MetadataResponse.PartitionMetadata> partitionsMetadata = new ArrayList<>(); if (error == Errors.NONE) { Optional<MetadataResponse.TopicMetadata> foundMetadata = initialUpdateResponse.topicMetadata() .stream() .filter(topicMetadata -> topicMetadata.topic().equals(topic)) .findFirst(); foundMetadata.ifPresent(topicMetadata -> { partitionsMetadata.addAll(topicMetadata.partitionMetadata()); }); } MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(error, topic, false, partitionsMetadata); List<Node> brokers = new ArrayList<>(initialUpdateResponse.brokers()); return new MetadataResponse(brokers, initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), Collections.singletonList(topicMetadata)); }