Tabnine Logo
kafka.zk
Code IndexAdd Tabnine to your IDE (free)

How to use kafka.zk

Best Java code snippets using kafka.zk (Showing top 20 results out of 315)

origin: apache/hive

 void deleteTopic(@SuppressWarnings("SameParameterValue") String topic) {
  adminZkClient.deleteTopic(topic);
 }
}
origin: apache/incubator-gobblin

public int getZookeeperPort() {
  return _zkServer.port();
}
origin: linkedin/kafka-monitor

void maybeAddPartitions(int minPartitionNum) {
 KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS,
   ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener");
 AdminZkClient adminZkClient = new AdminZkClient(zkClient);
 try {
  scala.collection.Map<Object, scala.collection.Seq<Object>> existingAssignment = getPartitionAssignment(zkClient, _topic);
  int partitionNum = existingAssignment.size();
  if (partitionNum < minPartitionNum) {
   LOG.info("MultiClusterTopicManagementService will increase partition of the topic {} "
     + "in cluster {} from {} to {}.", _topic, _zkConnect, partitionNum, minPartitionNum);
   scala.Option<scala.collection.Map<java.lang.Object, scala.collection.Seq<java.lang.Object>>> replicaAssignment = scala.Option.apply(null);
   scala.Option<Seq<Object>> brokerList = scala.Option.apply(null);
   adminZkClient.addPartitions(_topic, existingAssignment, adminZkClient.getBrokerMetadatas(RackAwareMode.Disabled$.MODULE$, brokerList), minPartitionNum, replicaAssignment, false);
  }
 } finally {
  zkClient.close();
 }
}
origin: apache/hive

/**
 * Override to set up your specific external resource.
 *
 * @throws Throwable if setup fails (which will disable {@code after}
 */
@Override protected void before() throws Throwable {
 // Start the ZK and the Broker
 LOG.info("init embedded Zookeeper");
 zkServer = new EmbeddedZookeeper();
 tmpLogDir = Files.createTempDirectory("kafka-log-dir-").toAbsolutePath();
 String zkConnect = "127.0.0.1:" + zkServer.port();
 LOG.info("init kafka broker");
 Properties brokerProps = new Properties();
 brokerProps.setProperty("zookeeper.connect", zkConnect);
 brokerProps.setProperty("broker.id", "0");
 brokerProps.setProperty("log.dir", tmpLogDir.toString());
 brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKER_IP_PORT);
 brokerProps.setProperty("offsets.topic.replication.factor", "1");
 brokerProps.setProperty("transaction.state.log.replication.factor", "1");
 brokerProps.setProperty("transaction.state.log.min.isr", "1");
 KafkaConfig config = new KafkaConfig(brokerProps);
 kafkaServer = TestUtils.createServer(config, Time.SYSTEM);
 kafkaServer.startup();
 kafkaServer.zkClient();
 adminZkClient = new AdminZkClient(kafkaServer.zkClient());
 LOG.info("Creating kafka TOPIC [{}]", TOPIC);
 adminZkClient.createTopic(TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
}
origin: apache/incubator-gobblin

public void startCluster() {
  // Start Zookeeper.
  _zkServer = new EmbeddedZookeeper();
  _zkConnectString = "127.0.0.1:"+_zkServer.port();
  _zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$);
  // Start Kafka Cluster.
  for(int i=0;i<clusterCount;i++) {
    KafkaServer _kafkaServer = createKafkaServer(i,_zkConnectString);
    kafkaBrokerList.add(_kafkaServer);
  }
}
origin: apache/incubator-gobblin

public static void closeServer() {
 if (serverStarted && !serverClosed) {
  serverClosed = true;
  kafkaServer.shutdown();
  zkClient.close();
  zkServer.shutdown();
 }
}
origin: apache/incubator-gobblin

public static void startServer() throws RuntimeException {
 if (serverStarted && serverClosed) {
  throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
 }
 if (!serverStarted) {
  serverStarted = true;
  zkConnect = TestZKUtils.zookeeperConnect();
  zkServer = new EmbeddedZookeeper(zkConnect);
  zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
  kafkaPort = TestUtils.choosePort();
  Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);
  KafkaConfig config = new KafkaConfig(props);
  Time mock = new MockTime();
  kafkaServer = TestUtils.createServer(config, mock);
 }
}
origin: apache/incubator-gobblin

void start()
 throws RuntimeException {
 if (_numStarted.incrementAndGet() == 1) {
  log.warn("Starting up Kafka server suite. Zk at " + _zkConnectString + "; Kafka server at " + _kafkaServerPort);
  _zkServer = new EmbeddedZookeeper(_zkConnectString);
  _zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$);
  Properties props = kafka.utils.TestUtils.createBrokerConfig(_brokerId, _kafkaServerPort, true);
  props.setProperty("zookeeper.connect", _zkConnectString);
  KafkaConfig config = new KafkaConfig(props);
  Time mock = new MockTime();
  _kafkaServer = kafka.utils.TestUtils.createServer(config, mock);
 }
 else
 {
  log.info("Kafka server suite already started... continuing");
 }
}
origin: apache/flume

/**
 * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper.
 * Allows for backwards compatibility of the zookeeperConnect configuration.
 */
private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener")) {
  List<Broker> brokerList =
      JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava();
  List<BrokerEndPoint> endPoints = brokerList.stream()
      .map(broker -> broker.brokerEndPoint(
        ListenerName.forSecurityProtocol(securityProtocol))
      )
      .collect(Collectors.toList());
  List<String> connections = new ArrayList<>();
  for (BrokerEndPoint endPoint : endPoints) {
   connections.add(endPoint.connectionString());
  }
  return StringUtils.join(connections, ',');
 }
}
origin: apache/flume

private void migrateOffsets() {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) {
  Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer);
  if (!kafkaOffsets.isEmpty()) {
   logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr);
   logger.debug("Offsets found: {}", kafkaOffsets);
   return;
  }
  logger.info("No Kafka offsets found. Migrating zookeeper offsets");
  Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets =
      getZookeeperOffsets(zkClient, consumer);
  if (zookeeperOffsets.isEmpty()) {
   logger.warn("No offsets to migrate found in Zookeeper");
   return;
  }
  logger.info("Committing Zookeeper offsets to Kafka");
  logger.debug("Offsets to commit: {}", zookeeperOffsets);
  consumer.commitSync(zookeeperOffsets);
  // Read the offsets to verify they were committed
  Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer);
  logger.debug("Offsets committed: {}", newKafkaOffsets);
  if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) {
   throw new FlumeException("Offsets could not be committed");
  }
 }
}
origin: debezium/debezium

/**
 * Create the specified topic.
 * 
 * @param topic the name of the topic to create
 * @param numPartitions the number of partitions for the topic
 * @param replicationFactor the replication factor for the topic
 */
public void createTopic( String topic, int numPartitions, int replicationFactor ) {
  RackAwareMode rackAwareMode = null;
  getAdminZkClient().createTopic(topic, numPartitions, replicationFactor, new Properties(), rackAwareMode);
}
origin: linkedin/kafka-monitor

private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) {
 scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
 scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments =
   zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
 List<PartitionInfo> partitionInfoList = new ArrayList<>();
 scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator();
 while (it.hasNext()) {
  scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
  Integer partition = (Integer) scalaTuple._1();
  scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition));
  Node leader = leaderOption.isEmpty() ?  null : new Node((Integer) leaderOption.get(), "", -1);
  Node[] replicas = new Node[scalaTuple._2().size()];
  for (int i = 0; i < replicas.length; i++) {
   Integer brokerId = (Integer) scalaTuple._2().apply(i);
   replicas[i] = new Node(brokerId, "", -1);
  }
  partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null));
 }
 return partitionInfoList;
}
origin: apache/flume

private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets(
    KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer) {
 Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
 List<PartitionInfo> partitions = consumer.partitionsFor(topicStr);
 for (PartitionInfo partition : partitions) {
  TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition());
  Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition);
  if (optionOffset.nonEmpty()) {
   Long offset = (Long) optionOffset.get();
   OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset);
   offsets.put(topicPartition, offsetAndMetadata);
  }
 }
 return offsets;
}
origin: linkedin/kafka-monitor

private static scala.collection.Map<Object, scala.collection.Seq<Object>> getPartitionAssignment(KafkaZkClient zkClient, String topic) {
 scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
 return zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
}
origin: apache/incubator-gobblin

if (_numStarted.incrementAndGet() == 1) {
 log.warn("Starting up Kafka server suite. Zk at " + _zkConnectString + "; Kafka server at " + _kafkaServerPort);
 _zkServer = new EmbeddedZookeeper();
 _zkConnectString = "127.0.0.1:"+_zkServer.port();
 _zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$);
origin: apache/incubator-gobblin

void shutdown() {
 if (_numStarted.decrementAndGet() == 0) {
  log.info("Shutting down Kafka server suite");
  _kafkaServer.shutdown();
  _zkClient.close();
  _zkServer.shutdown();
 }
 else {
  log.info("Kafka server suite still in use ... not shutting down yet");
 }
}
origin: apache/flume

private void migrateOffsets(String topicStr) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
origin: apache/flume

 private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets(
     KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer, String topicStr) {

  Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
  List<PartitionInfo> partitions = consumer.partitionsFor(topicStr);
  for (PartitionInfo partition : partitions) {
   TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition());
   Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition);
   if (optionOffset.nonEmpty()) {
    Long offset = (Long) optionOffset.get();
    OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset);
    offsets.put(topicPartition, offsetAndMetadata);
   }
  }
  return offsets;
 }
}
origin: apache/incubator-gobblin

void shutdown() {
 if (_numStarted.decrementAndGet() == 0) {
  log.info("Shutting down Kafka server suite");
  _kafkaServer.shutdown();
  _zkClient.close();
  _zkServer.shutdown();
 }
 else {
  log.info("Kafka server suite still in use ... not shutting down yet");
 }
}
origin: apache/hive

/**
 * Override to tear down your specific external resource.
 */
@Override protected void after() {
 super.after();
 try {
  FileUtils.deleteDirectory(new File(tmpLogDir.toString()));
 } catch (IOException e) {
  LOG.error("Error cleaning " + tmpLogDir.toString(), e);
 }
 if (kafkaServer != null) {
  kafkaServer.shutdown();
  kafkaServer.awaitShutdown();
 }
 zkServer.shutdown();
}
kafka.zk

Most used classes

  • EmbeddedZookeeper
  • KafkaZkClient
  • AdminZkClient
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now