public void createTopic(String topicName) throws Exception { kafkaAdminClient.createTopics(Collections.singleton(new NewTopic(topicName, 1, (short)1))) .all() .get(30, TimeUnit.SECONDS); }
/** * Create a Kafka topic with the given parameters. * * @param topic The name of the topic. * @param partitions The number of partitions for this topic. * @param replication The replication factor for (partitions of) this topic. * @param topicConfig Additional topic-level configuration settings. */ void createTopic(final String topic, final int partitions, final int replication, final Map<String, String> topicConfig) { log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", topic, partitions, replication, topicConfig); final ImmutableMap<String, Object> props = ImmutableMap.of( AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList(), AdminClientConfig.RETRIES_CONFIG, 5); try (AdminClient adminClient = AdminClient.create(props)) { final NewTopic newTopic = new NewTopic(topic, partitions, (short) replication); newTopic.configs(topicConfig); try { final CreateTopicsResult result = adminClient.createTopics(ImmutableList.of(newTopic)); result.all().get(); } catch (final Exception e) { throw new RuntimeException("Failed to create topic:" + topic, e); } } }
String topicName = newTopic.name(); KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); String topicName = newTopic.name(); if (allTopics.containsKey(topicName)) { future.completeExceptionally(new TopicExistsException(String.format("Topic %s exists already.", topicName))); continue; int replicationFactor = newTopic.replicationFactor(); List<Node> replicas = new ArrayList<>(replicationFactor); for (int i = 0; i < replicationFactor; ++i) { int numberOfPartitions = newTopic.numPartitions(); List<TopicPartitionInfo> partitions = new ArrayList<>(numberOfPartitions); for (int p = 0; p < numberOfPartitions; ++p) { partitions.add(new TopicPartitionInfo(p, brokers.get(0), replicas, Collections.emptyList())); allTopics.put(topicName, new TopicMetadata(false, partitions, newTopic.configs())); future.complete(null); createTopicResult.put(topicName, future);
final Map<String, CreateTopicsRequest.TopicDetails> topicsMap = new HashMap<>(newTopics.size()); for (NewTopic newTopic : newTopics) { if (topicNameIsUnrepresentable(newTopic.name())) { KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); future.completeExceptionally(new InvalidTopicException("The given topic name '" + newTopic.name() + "' cannot be represented in a request.")); topicFutures.put(newTopic.name(), future); } else if (!topicFutures.containsKey(newTopic.name())) { topicFutures.put(newTopic.name(), new KafkaFutureImpl<>()); topicsMap.put(newTopic.name(), newTopic.convertToTopicDetails());
private void addTopicsIfNeeded(AdminClient adminClient, Collection<NewTopic> topics) { if (topics.size() > 0) { Map<String, NewTopic> topicNameToTopic = new HashMap<>(); topics.forEach(t -> topicNameToTopic.compute(t.name(), (k, v) -> v = t)); DescribeTopicsResult topicInfo = adminClient .describeTopics(topics.stream() .map(NewTopic::name) .collect(Collectors.toList())); List<NewTopic> topicsToAdd = new ArrayList<>(); Map<String, NewPartitions> topicsToModify = checkPartitions(topicNameToTopic, topicInfo, topicsToAdd); if (topicsToAdd.size() > 0) { addTopics(adminClient, topicsToAdd); } if (topicsToModify.size() > 0) { modifyTopics(adminClient, topicsToModify); } } }
/** * Add topics to the existing broker(s). * The broker(s) must be running. * @param topics the topics. * @since 2.2 */ public void addTopics(NewTopic... topics) { Assert.notNull(this.zookeeper, "Broker must be started before this method can be called"); for (NewTopic topic : topics) { Assert.isTrue(this.topics.add(topic.name()), () -> "topic already exists: " + topic); Assert.isTrue(topic.replicationFactor() <= this.count && (topic.replicasAssignments() == null || topic.replicasAssignments().size() <= this.count), () -> "Embedded kafka does not support the requested replication factor: " + topic); } doWithAdmin(admin -> createTopics(admin, Arrays.asList(topics))); }
@Test public void alreadyExists() throws Exception { AtomicReference<Method> addTopics = new AtomicReference<>(); AtomicReference<Method> modifyTopics = new AtomicReference<>(); ReflectionUtils.doWithMethods(KafkaAdmin.class, m -> { m.setAccessible(true); if (m.getName().equals("addTopics")) { addTopics.set(m); } else if (m.getName().equals("modifyTopics")) { modifyTopics.set(m); } }, m -> { return m.getName().endsWith("Topics"); }); try (AdminClient adminClient = AdminClient.create(this.admin.getConfig())) { addTopics.get().invoke(this.admin, adminClient, Collections.singletonList(this.topic1)); modifyTopics.get().invoke(this.admin, adminClient, Collections.singletonMap( this.topic1.name(), NewPartitions.increaseTo(this.topic1.numPartitions()))); } }
try { TopicDescription topicDescription = f.get(this.operationTimeout, TimeUnit.SECONDS); if (topic.numPartitions() < topicDescription.partitions().size()) { if (logger.isInfoEnabled()) { logger.info(String.format( "Topic '%s' exists but has a different partition count: %d not %d", n, topicDescription.partitions().size(), topic.numPartitions())); else if (topic.numPartitions() > topicDescription.partitions().size()) { if (logger.isInfoEnabled()) { logger.info(String.format( "Topic '%s' exists but has a different partition count: %d not %d, increasing " + "if the broker supports it", n, topicDescription.partitions().size(), topic.numPartitions())); topicsToModify.put(n, NewPartitions.increaseTo(topic.numPartitions()));
/** * Attempt to create the topic described by the given definition, returning true if the topic was created or false * if the topic already existed. * * @param topic the specification of the topic * @return true if the topic was created or false if the topic already existed. * @throws ConnectException if an error occurs, the operation takes too long, or the thread is interrupted while * attempting to perform this operation * @throws UnsupportedVersionException if the broker does not support the necessary APIs to perform this request */ public boolean createTopic(NewTopic topic) { if (topic == null) return false; Set<String> newTopicNames = createTopics(topic); return newTopicNames.contains(topic.name()); }
/** * Add topics to the existing broker(s). * The broker(s) must be running. * @param topics the topics. * @since 2.2 */ public void addTopics(NewTopic... topics) { Assert.notNull(this.zookeeper, "Broker must be started before this method can be called"); for (NewTopic topic : topics) { Assert.isTrue(this.topics.add(topic.name()), () -> "topic already exists: " + topic); Assert.isTrue(topic.replicationFactor() <= this.count && (topic.replicasAssignments() == null || topic.replicasAssignments().size() <= this.count), () -> "Embedded kafka does not support the requested replication factor: " + topic); } doWithAdmin(admin -> createTopics(admin, Arrays.asList(topics))); }
@Override public void createTestTopic(String topic, int numberOfPartitions, int replicationFactor, Properties properties) { LOG.info("Creating topic {}", topic); try (AdminClient adminClient = AdminClient.create(getStandardProperties())) { NewTopic topicObj = new NewTopic(topic, numberOfPartitions, (short) replicationFactor); adminClient.createTopics(Collections.singleton(topicObj)).all().get(); } catch (Exception e) { e.printStackTrace(); fail("Create test topic : " + topic + " failed, " + e.getMessage()); } }
@Override public void initializeStorage() { super.initializeStorage(); try (AdminClient admin = AdminClient.create(this.producerConfig.asProperties())) { // Find default replication factor Config brokerConfig = getKafkaBrokerConfig(admin); final short replicationFactor = Short.parseShort(brokerConfig.get(DEFAULT_TOPIC_REPLICATION_FACTOR_PROP_NAME).value()); // Create topic final NewTopic topic = new NewTopic(topicName, (short)1, replicationFactor); topic.configs(Collect.hashMapOf("cleanup.policy", "delete", "retention.ms", Long.toString(Long.MAX_VALUE), "retention.bytes", "-1")); admin.createTopics(Collections.singleton(topic)); logger.info("Database history topic '{}' created", topic); } catch (Exception e) { throw new ConnectException("Creation of database history topic failed, please create the topic manually", e); } }
private void createCruiseControlMetricsTopic() { try { final CreateTopicsResult createTopicsResult = _adminClient.createTopics(Collections.singletonList(_newTopic)); createTopicsResult.values().get(_newTopic.name()).get(); LOG.info("Cruise Control metrics topic created: {}", _cruiseControlMetricsTopic); } catch (InterruptedException | ExecutionException e) { if (!(e.getCause() instanceof TopicExistsException)) { LOG.error("Unable to create Cruise Control topic", e); } } }
public void createTopics(List<String> topicNames, int numPartitions) { List<NewTopic> newTopics = new ArrayList<>(); for (String topicName: topicNames) { NewTopic newTopic = new NewTopic(topicName, numPartitions, (short) 1); newTopics.add(newTopic); } getAdminClient().createTopics(newTopics); //the following lines are a bit of black magic to ensure the topic is ready when we return DescribeTopicsResult dtr = getAdminClient().describeTopics(topicNames); try { dtr.all().get(10, TimeUnit.SECONDS); } catch (Exception e) { throw new RuntimeException("Error getting topic info", e); } } public void deleteTopic(String topicName) {
/** * Create a new topic via the Kafka AdminClient API, calling the given handler * (in a different thread) with the result. */ @Override public void createTopic(Topic topic, Handler<AsyncResult<Void>> handler) { NewTopic newTopic = TopicSerialization.toNewTopic(topic, null); LOGGER.debug("Creating topic {}", newTopic); KafkaFuture<Void> future = adminClient.createTopics( Collections.singleton(newTopic)).values().get(newTopic.name()); queueWork(new UniWork<>("createTopic", future, handler)); }
@Test public void testCreateTopics() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
if (topics != null) { for (NewTopic topic : topics) { if (topic != null) topicsByName.put(topic.name(), topic);