Tabnine Logo
KafkaZkClient.apply
Code IndexAdd Tabnine to your IDE (free)

How to use
apply
method
in
kafka.zk.KafkaZkClient

Best Java code snippets using kafka.zk.KafkaZkClient.apply (Showing top 11 results out of 315)

origin: apache/flume

private void migrateOffsets() {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) {
  Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer);
  if (!kafkaOffsets.isEmpty()) {
   logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr);
   logger.debug("Offsets found: {}", kafkaOffsets);
   return;
  }
  logger.info("No Kafka offsets found. Migrating zookeeper offsets");
  Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets =
      getZookeeperOffsets(zkClient, consumer);
  if (zookeeperOffsets.isEmpty()) {
   logger.warn("No offsets to migrate found in Zookeeper");
   return;
  }
  logger.info("Committing Zookeeper offsets to Kafka");
  logger.debug("Offsets to commit: {}", zookeeperOffsets);
  consumer.commitSync(zookeeperOffsets);
  // Read the offsets to verify they were committed
  Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer);
  logger.debug("Offsets committed: {}", newKafkaOffsets);
  if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) {
   throw new FlumeException("Offsets could not be committed");
  }
 }
}
origin: apache/flume

private void migrateOffsets(String topicStr) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
origin: apache/flume

/**
 * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper.
 * Allows for backwards compatibility of the zookeeperConnect configuration.
 */
private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener")) {
  List<Broker> brokerList =
      JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava();
  List<BrokerEndPoint> endPoints = brokerList.stream()
      .map(broker -> broker.brokerEndPoint(
        ListenerName.forSecurityProtocol(securityProtocol))
      )
      .collect(Collectors.toList());
  List<String> connections = new ArrayList<>();
  for (BrokerEndPoint endPoint : endPoints) {
   connections.add(endPoint.connectionString());
  }
  return StringUtils.join(connections, ',');
 }
}
origin: linkedin/kafka-monitor

void maybeReassignPartitionAndElectLeader() throws Exception {
 KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS,
   ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener");
origin: linkedin/kafka-monitor

void maybeAddPartitions(int minPartitionNum) {
 KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS,
   ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener");
 AdminZkClient adminZkClient = new AdminZkClient(zkClient);
 try {
  scala.collection.Map<Object, scala.collection.Seq<Object>> existingAssignment = getPartitionAssignment(zkClient, _topic);
  int partitionNum = existingAssignment.size();
  if (partitionNum < minPartitionNum) {
   LOG.info("MultiClusterTopicManagementService will increase partition of the topic {} "
     + "in cluster {} from {} to {}.", _topic, _zkConnect, partitionNum, minPartitionNum);
   scala.Option<scala.collection.Map<java.lang.Object, scala.collection.Seq<java.lang.Object>>> replicaAssignment = scala.Option.apply(null);
   scala.Option<Seq<Object>> brokerList = scala.Option.apply(null);
   adminZkClient.addPartitions(_topic, existingAssignment, adminZkClient.getBrokerMetadatas(RackAwareMode.Disabled$.MODULE$, brokerList), minPartitionNum, replicaAssignment, false);
  }
 } finally {
  zkClient.close();
 }
}
origin: org.apache.flume.flume-ng-channels/flume-kafka-channel

private void migrateOffsets() {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) {
  Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer);
  if (!kafkaOffsets.isEmpty()) {
   logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr);
   logger.debug("Offsets found: {}", kafkaOffsets);
   return;
  }
  logger.info("No Kafka offsets found. Migrating zookeeper offsets");
  Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets =
      getZookeeperOffsets(zkClient, consumer);
  if (zookeeperOffsets.isEmpty()) {
   logger.warn("No offsets to migrate found in Zookeeper");
   return;
  }
  logger.info("Committing Zookeeper offsets to Kafka");
  logger.debug("Offsets to commit: {}", zookeeperOffsets);
  consumer.commitSync(zookeeperOffsets);
  // Read the offsets to verify they were committed
  Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer);
  logger.debug("Offsets committed: {}", newKafkaOffsets);
  if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) {
   throw new FlumeException("Offsets could not be committed");
  }
 }
}
origin: org.apache.flume.flume-ng-sources/flume-kafka-source

private void migrateOffsets(String topicStr) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
origin: org.apache.flume.flume-ng-sources/flume-kafka-source

/**
 * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper.
 * Allows for backwards compatibility of the zookeeperConnect configuration.
 */
private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener")) {
  List<Broker> brokerList =
      JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava();
  List<BrokerEndPoint> endPoints = brokerList.stream()
      .map(broker -> broker.brokerEndPoint(
        ListenerName.forSecurityProtocol(securityProtocol))
      )
      .collect(Collectors.toList());
  List<String> connections = new ArrayList<>();
  for (BrokerEndPoint endPoint : endPoints) {
   connections.add(endPoint.connectionString());
  }
  return StringUtils.join(connections, ',');
 }
}
origin: uber/uReplicator

 public static void createTopic(String kafkaTopic, String zkStr) {
  // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin
  try {
   String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", "1",
     "--partitions", "1", "--topic", kafkaTopic};
   KafkaZkClient zkClient = KafkaZkClient.apply(zkStr, false, 30000, 30000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server",
     "SessionExpireListener");
   TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args);
   TopicCommand.createTopic(zkClient, opts);
  } catch (TopicExistsException e) {
   // Catch TopicExistsException otherwise it will break maven-surefire-plugin
   System.out.println("Topic already existed");
  }
 }
}
origin: uber/uReplicator

 public static void createTopic(String kafkaTopic, String zkStr) {
  // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin
  try {
   String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", "1",
     "--partitions", "1", "--topic", kafkaTopic};
   KafkaZkClient zkClient = KafkaZkClient.apply(zkStr, false, 30000, 30000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server",
     "SessionExpireListener");
   TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args);
   TopicCommand.createTopic(zkClient, opts);
  } catch (TopicExistsException e) {
   // Catch TopicExistsException otherwise it will break maven-surefire-plugin
   System.out.println("Topic already existed");
  }
 }
}
origin: uber/uReplicator

 public static void createTopic(String kafkaTopic, String zkStr) {
  // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin
  try {
   String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", "1",
     "--partitions", "1", "--topic", kafkaTopic};
   KafkaZkClient zkClient = KafkaZkClient.apply(zkStr, false, 30000, 30000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server",
     "SessionExpireListener");
   TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args);
   TopicCommand.createTopic(zkClient, opts);
  } catch (TopicExistsException e) {
   // Catch TopicExistsException otherwise it will break maven-surefire-plugin
   System.out.println("Topic already existed");
  }
 }
}
kafka.zkKafkaZkClientapply

Popular methods of KafkaZkClient

  • <init>
  • topicExists
  • getAllBrokersInCluster
  • getConsumerOffset
  • getLeaderForPartition
  • isTopicMarkedForDeletion
  • close
  • createPartitionReassignment
  • getBroker
  • getEntityConfigs
  • getPartitionAssignmentForTopics
  • reassignPartitionsInProgress
  • getPartitionAssignmentForTopics,
  • reassignPartitionsInProgress,
  • setOrCreateEntityConfigs

Popular in Java

  • Creating JSON documents from java classes using gson
  • startActivity (Activity)
  • getApplicationContext (Context)
  • getSystemService (Context)
  • Graphics2D (java.awt)
    This Graphics2D class extends the Graphics class to provide more sophisticated control overgraphics
  • PrintStream (java.io)
    Fake signature of an existing Java class.
  • GregorianCalendar (java.util)
    GregorianCalendar is a concrete subclass of Calendarand provides the standard calendar used by most
  • Locale (java.util)
    Locale represents a language/country/variant combination. Locales are used to alter the presentatio
  • NoSuchElementException (java.util)
    Thrown when trying to retrieve an element past the end of an Enumeration or Iterator.
  • IsNull (org.hamcrest.core)
    Is the value null?
  • Top plugins for Android Studio
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now