public static MetadataResponse parse(ByteBuffer buffer, short version) { return new MetadataResponse(ApiKeys.METADATA.parseResponse(version, buffer)); }
@Override public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { List<MetadataResponse.TopicMetadata> topicMetadatas = new ArrayList<>(); Errors error = Errors.forException(e); List<MetadataResponse.PartitionMetadata> partitions = Collections.emptyList(); if (topics != null) { for (String topic : topics) topicMetadatas.add(new MetadataResponse.TopicMetadata(error, topic, false, partitions)); } short versionId = version(); switch (versionId) { case 0: case 1: case 2: return new MetadataResponse(Collections.emptyList(), null, MetadataResponse.NO_CONTROLLER_ID, topicMetadatas); case 3: case 4: case 5: case 6: case 7: return new MetadataResponse(throttleTimeMs, Collections.emptyList(), null, MetadataResponse.NO_CONTROLLER_ID, topicMetadatas); default: throw new IllegalArgumentException(String.format("Version %d is not valid. Valid versions for %s are 0 to %d", versionId, this.getClass().getSimpleName(), ApiKeys.METADATA.latestVersion())); } }
private static MetadataResponse emptyMetadataResponse() { return new MetadataResponse( Collections.emptyList(), null, -1, Collections.emptyList()); }
private MetadataResponse createMetadataResponse() { Node node = new Node(1, "host1", 1001); List<Node> replicas = asList(node); List<Node> isr = asList(node); List<Node> offlineReplicas = asList(); List<MetadataResponse.TopicMetadata> allTopicMetadata = new ArrayList<>(); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, "__consumer_offsets", true, asList(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, node, Optional.of(5), replicas, isr, offlineReplicas)))); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.LEADER_NOT_AVAILABLE, "topic2", false, Collections.emptyList())); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, "topic3", false, asList(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, 0, null, Optional.empty(), replicas, isr, offlineReplicas)))); return new MetadataResponse(asList(node), null, MetadataResponse.NO_CONTROLLER_ID, allTopicMetadata); }
public static MetadataResponse metadataUpdateWith(final String clusterId, final int numNodes, final Map<String, Errors> topicErrors, final Map<String, Integer> topicPartitionCounts, final PartitionMetadataSupplier partitionSupplier) { final List<Node> nodes = new ArrayList<>(numNodes); for (int i = 0; i < numNodes; i++) nodes.add(new Node(i, "localhost", 1969 + i)); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); for (Map.Entry<String, Integer> topicPartitionCountEntry : topicPartitionCounts.entrySet()) { String topic = topicPartitionCountEntry.getKey(); int numPartitions = topicPartitionCountEntry.getValue(); List<MetadataResponse.PartitionMetadata> partitionMetadata = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) { Node leader = nodes.get(i % nodes.size()); List<Node> replicas = Collections.singletonList(leader); partitionMetadata.add(partitionSupplier.supply( Errors.NONE, i, leader, Optional.empty(), replicas, replicas, replicas)); } topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, topic, Topic.isInternal(topic), partitionMetadata)); } for (Map.Entry<String, Errors> topicErrorEntry : topicErrors.entrySet()) { String topic = topicErrorEntry.getKey(); topicMetadata.add(new MetadataResponse.TopicMetadata(topicErrorEntry.getValue(), topic, Topic.isInternal(topic), Collections.emptyList())); } return new MetadataResponse(nodes, clusterId, 0, topicMetadata); }
private MetadataResponse newMetadataResponse(String topic, Errors error) { List<MetadataResponse.PartitionMetadata> partitionsMetadata = new ArrayList<>(); if (error == Errors.NONE) { Optional<MetadataResponse.TopicMetadata> foundMetadata = initialUpdateResponse.topicMetadata() .stream() .filter(topicMetadata -> topicMetadata.topic().equals(topic)) .findFirst(); foundMetadata.ifPresent(topicMetadata -> { partitionsMetadata.addAll(topicMetadata.partitionMetadata()); }); } MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(error, topic, false, partitionsMetadata); List<Node> brokers = new ArrayList<>(initialUpdateResponse.brokers()); return new MetadataResponse(brokers, initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), Collections.singletonList(topicMetadata)); }
env.kafkaClient().prepareResponse(new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t)); env.kafkaClient().prepareResponse(new DeleteRecordsResponse(0, m));
new MetadataResponse( Collections.emptyList(), env.cluster().clusterResource().clusterId(),
@Test public void testSendToInvalidTopic() throws Exception { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000"); Time time = new MockTime(); MetadataResponse initialUpdateResponse = TestUtils.metadataUpdateWith(1, emptyMap()); Metadata metadata = new Metadata(0, Long.MAX_VALUE, true); metadata.update(initialUpdateResponse, time.milliseconds()); MockClient client = new MockClient(time, metadata); Producer<String, String> producer = new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time); String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka"); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse( new ArrayList<>(initialUpdateResponse.brokers()), initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); Future<RecordMetadata> future = producer.send(record); assertEquals("Cluster has incorrect invalid topic list.", Collections.singleton(invalidTopicName), metadata.fetch().invalidTopics()); TestUtils.assertFutureError(future, InvalidTopicException.class); producer.close(Duration.ofMillis(0)); }
env.kafkaClient().prepareResponse(new MetadataResponse(initializedCluster.nodes(), initializedCluster.clusterResource().clusterId(), initializedCluster.controller().id(), Errors.NONE, 0, leader, Optional.of(10), singletonList(leader), singletonList(leader), singletonList(leader)); env.kafkaClient().prepareResponse(new MetadataResponse(initializedCluster.nodes(), initializedCluster.clusterResource().clusterId(), 1, singletonList(new MetadataResponse.TopicMetadata(Errors.NONE, topic, false,
@Test(expected = InvalidTopicException.class) public void testSubscriptionOnInvalidTopic() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Cluster cluster = metadata.fetch(); PartitionAssignor assignor = new RoundRobinAssignor(); String invalidTopicName = "topic abc"; // Invalid topic name due to space List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(invalidTopicName), getConsumerRebalanceListener(consumer)); consumer.poll(Duration.ZERO); } }
@Test public void testUnreachableBootstrapServer() throws Exception { // This tests the scenario in which the bootstrap server is unreachable for a short while, // which prevents AdminClient from being able to send the initial metadata request Cluster cluster = Cluster.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 8121))); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().setUnreachable(cluster.nodes().get(0), 200); env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
MetadataResponse altered = new MetadataResponse( (List<Node>) originalResponse.brokers(), originalResponse.clusterId(),
@Test public void testConnectionFailureOnMetadataUpdate() throws Exception { // This tests the scenario in which we successfully connect to the bootstrap server, but // the server disconnects before sending the full response Cluster cluster = mockBootstrapCluster(); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, null, true); env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
@Test public void testCreateTopicsHandleNotControllerException() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NOT_CONTROLLER, ""))), env.cluster().nodeById(0)); env.kafkaClient().prepareResponse(new MetadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.<MetadataResponse.TopicMetadata>emptyList())); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NONE, ""))), env.cluster().nodeById(1)); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
new MetadataResponse( Collections.emptyList(), env.cluster().clusterResource().clusterId(), new MetadataResponse( env.cluster().nodes(), env.cluster().clusterResource().clusterId(),
return new ListOffsetResponse(struct); case METADATA: return new MetadataResponse(struct); case OFFSET_COMMIT: return new OffsetCommitResponse(struct);
public static MetadataResponse parse(ByteBuffer buffer) { return new MetadataResponse(((Struct) curSchema.read(buffer))); } }
private void handleMetadataResponse(RequestHeader header, Struct body, long now) { this.metadataFetchInProgress = false; MetadataResponse response = new MetadataResponse(body); Cluster cluster = response.cluster(); // don't update the cluster if there are no valid nodes...the topic we want may still be in the process of being // created which means we will get errors and no nodes until it exists if (cluster.nodes().size() > 0) { this.metadata.update(cluster, now); } else { log.trace("Ignoring empty metadata response with correlation id {}.", header.correlationId()); this.metadata.failedUpdate(now); } }
private void handleMetadataRequest(ChannelHandlerContext ctx, Request request) { short requestVersion = request.getHeader().apiVersion(); sendResponse(ctx, new Response( new ResponseHeader(request.getHeader().correlationId()), new MetadataResponse( coordinator.getAllGroupCoordinators(), "", -1, Lists.newArrayList(), requestVersion ) ) ); }