int [] cursors = new int[racks.size()]; int rackCursor = 0; for (MetadataResponse.PartitionMetadata pm : topicMetadata.partitionMetadata()) { if (pm.replicas().size() < replicationFactor) { List<Object> newAssignedReplica = new ArrayList<>();
/** * Transform a MetadataResponse into a new MetadataCache instance. */ private MetadataCache handleMetadataResponse(MetadataResponse metadataResponse, Predicate<String> topicsToRetain) { Set<String> internalTopics = new HashSet<>(); List<MetadataCache.PartitionInfoAndEpoch> partitions = new ArrayList<>(); for (MetadataResponse.TopicMetadata metadata : metadataResponse.topicMetadata()) { if (!topicsToRetain.test(metadata.topic())) continue; if (metadata.error() == Errors.NONE) { if (metadata.isInternal()) internalTopics.add(metadata.topic()); for (MetadataResponse.PartitionMetadata partitionMetadata : metadata.partitionMetadata()) { updatePartitionInfo(metadata.topic(), partitionMetadata, partitionInfo -> { int epoch = partitionMetadata.leaderEpoch().orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH); partitions.add(new MetadataCache.PartitionInfoAndEpoch(partitionInfo, epoch)); }); } } } return new MetadataCache(metadataResponse.clusterId(), new ArrayList<>(metadataResponse.brokers()), partitions, metadataResponse.topicsByError(Errors.TOPIC_AUTHORIZATION_FAILED), metadataResponse.topicsByError(Errors.INVALID_TOPIC_EXCEPTION), internalTopics, metadataResponse.controller()); }
singletonList(nodes.get(0)), singletonList(nodes.get(0)), Collections.emptyList())); t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
Integer partitions = entry.getValue(); if (AdminUtils.topicExists(zkUtils, topic)) { int existingPartitions = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata() .size(); if (existingPartitions < partitions) {
public TopicMeta addPartition(@TopicExistConstraint String topic, AddPartition addPartition) { List<MetadataResponse.PartitionMetadata> partitionMataData = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata(); int numPartitions = partitionMataData.size(); int numReplica = partitionMataData.get(0).replicas().size();
@Override public int getLeaderToShutDown(String topic) throws Exception { ZkUtils zkUtils = getZkUtils(); try { MetadataResponse.PartitionMetadata firstPart = null; do { if (firstPart != null) { LOG.info("Unable to find leader. error code {}", firstPart.error().code()); // not the first try. Sleep a bit Thread.sleep(150); } List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata(); firstPart = partitionMetadata.get(0); } while (firstPart.error().code() != 0); return firstPart.leader().id(); } finally { zkUtils.close(); } }
@Override public int getLeaderToShutDown(String topic) throws Exception { ZkUtils zkUtils = getZkUtils(); try { MetadataResponse.PartitionMetadata firstPart = null; do { if (firstPart != null) { LOG.info("Unable to find leader. error code {}", firstPart.error().code()); // not the first try. Sleep a bit Thread.sleep(150); } List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata(); firstPart = partitionMetadata.get(0); } while (firstPart.error().code() != 0); return firstPart.leader().id(); } finally { zkUtils.close(); } }
@Override public int getLeaderToShutDown(String topic) throws Exception { ZkUtils zkUtils = getZkUtils(); try { MetadataResponse.PartitionMetadata firstPart = null; do { if (firstPart != null) { LOG.info("Unable to find leader. error code {}", firstPart.error().code()); // not the first try. Sleep a bit Thread.sleep(150); } List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata(); firstPart = partitionMetadata.get(0); } while (firstPart.error().code() != 0); return firstPart.leader().id(); } finally { zkUtils.close(); } }
@Override public int getLeaderToShutDown(String topic) throws Exception { ZkUtils zkUtils = getZkUtils(); try { MetadataResponse.PartitionMetadata firstPart = null; do { if (firstPart != null) { LOG.info("Unable to find leader. error code {}", firstPart.error().code()); // not the first try. Sleep a bit Thread.sleep(150); } List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata(); firstPart = partitionMetadata.get(0); } while (firstPart.error().code() != 0); return firstPart.leader().id(); } finally { zkUtils.close(); } }
@Override public int getLeaderToShutDown(String topic) throws Exception { ZkUtils zkUtils = getZkUtils(); try { MetadataResponse.PartitionMetadata firstPart = null; do { if (firstPart != null) { LOG.info("Unable to find leader. error code {}", firstPart.error().code()); // not the first try. Sleep a bit Thread.sleep(150); } List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata(); firstPart = partitionMetadata.get(0); } while (firstPart.error().code() != 0); return firstPart.leader().id(); } finally { zkUtils.close(); } }
@Override public int getLeaderToShutDown(String topic) throws Exception { ZkUtils zkUtils = getZkUtils(); try { MetadataResponse.PartitionMetadata firstPart = null; do { if (firstPart != null) { LOG.info("Unable to find leader. error code {}", firstPart.error().code()); // not the first try. Sleep a bit Thread.sleep(150); } List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata(); firstPart = partitionMetadata.get(0); } while (firstPart.error().code() != 0); return firstPart.leader().id(); } finally { zkUtils.close(); } }
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) { MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils); return topicMetadata.error().code(); }
public String createUniqueTopic(String prefix, int partitions, Properties topicConfig) throws InterruptedException { checkReady(); String topic = (prefix + UUID.randomUUID().toString().substring(0, 5)).replaceAll("[^a-zA-Z0-9._-]", "_"); AdminUtils.createTopic(kafkaServer.zkUtils(), topic, partitions, 1, topicConfig, AdminUtils.createTopic$default$6()); waitForPassing(Duration.ofSeconds(5), () -> { assertTrue(AdminUtils.fetchTopicMetadataFromZk(topic, kafkaServer.zkUtils()) .partitionMetadata().stream() .allMatch(pm -> !pm.leader().isEmpty())); }); return topic; }
@SuppressWarnings("unchecked") public int partitionSize(String topic, ZkUtils zkUtils) { MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils); return topicMetadata.partitionMetadata().size(); }
/** * Add new partitions to the Kafka topic. * * @param zkUtils ZkUtils class to use to increase replication factor. * @param topic The topic to apply the change. * @param topicMetadata Topic metadata stored in Zookeeper. * @param partitionCount The target partition count of the topic. */ private void maybeIncreaseTopicPartitionCount(ZkUtils zkUtils, String topic, MetadataResponse.TopicMetadata topicMetadata, int partitionCount) { if (partitionCount > topicMetadata.partitionMetadata().size()) { if (!ensureTopicNotUnderPartitionReassignment(zkUtils, topic)) { LOG.warn("There are ongoing partition reassignments for topic {}, skip checking its partition count.", topic); return; } AdminUtils.addPartitions(zkUtils, topic, partitionCount, "", true, RackAwareMode.Safe$.MODULE$); LOG.info("Kafka topic " + topic + " now has " + partitionCount + " partitions."); } }
topicData.set(ERROR_CODE, metadata.error.code()); topicData.setIfExists(IS_INTERNAL, metadata.isInternal()); for (PartitionMetadata partitionMetadata : metadata.partitionMetadata()) { Struct partitionData = topicData.instance(PARTITION_METADATA); partitionData.set(ERROR_CODE, partitionMetadata.error.code());
topicMetadata.add(new TopicMetadata(topicError, topic, isInternal, partitionMetadata));
@Test public void testSendToInvalidTopic() throws Exception { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000"); Time time = new MockTime(); MetadataResponse initialUpdateResponse = TestUtils.metadataUpdateWith(1, emptyMap()); Metadata metadata = new Metadata(0, Long.MAX_VALUE, true); metadata.update(initialUpdateResponse, time.milliseconds()); MockClient client = new MockClient(time, metadata); Producer<String, String> producer = new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time); String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka"); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse( new ArrayList<>(initialUpdateResponse.brokers()), initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); Future<RecordMetadata> future = producer.send(record); assertEquals("Cluster has incorrect invalid topic list.", Collections.singleton(invalidTopicName), metadata.fetch().invalidTopics()); TestUtils.assertFutureError(future, InvalidTopicException.class); producer.close(Duration.ofMillis(0)); }
public static MetadataResponse metadataUpdateWith(final String clusterId, final int numNodes, final Map<String, Errors> topicErrors, final Map<String, Integer> topicPartitionCounts, final PartitionMetadataSupplier partitionSupplier) { final List<Node> nodes = new ArrayList<>(numNodes); for (int i = 0; i < numNodes; i++) nodes.add(new Node(i, "localhost", 1969 + i)); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); for (Map.Entry<String, Integer> topicPartitionCountEntry : topicPartitionCounts.entrySet()) { String topic = topicPartitionCountEntry.getKey(); int numPartitions = topicPartitionCountEntry.getValue(); List<MetadataResponse.PartitionMetadata> partitionMetadata = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) { Node leader = nodes.get(i % nodes.size()); List<Node> replicas = Collections.singletonList(leader); partitionMetadata.add(partitionSupplier.supply( Errors.NONE, i, leader, Optional.empty(), replicas, replicas, replicas)); } topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, topic, Topic.isInternal(topic), partitionMetadata)); } for (Map.Entry<String, Errors> topicErrorEntry : topicErrors.entrySet()) { String topic = topicErrorEntry.getKey(); topicMetadata.add(new MetadataResponse.TopicMetadata(topicErrorEntry.getValue(), topic, Topic.isInternal(topic), Collections.emptyList())); } return new MetadataResponse(nodes, clusterId, 0, topicMetadata); }
env.kafkaClient().prepareResponse(new MetadataResponse(initializedCluster.nodes(), initializedCluster.clusterResource().clusterId(), 1, singletonList(new MetadataResponse.TopicMetadata(Errors.NONE, topic, false, singletonList(partitionMetadata)))));