Tabnine Logo
KafkaZkClient
Code IndexAdd Tabnine to your IDE (free)

How to use
KafkaZkClient
in
kafka.zk

Best Java code snippets using kafka.zk.KafkaZkClient (Showing top 20 results out of 315)

origin: linkedin/kafka-monitor

void maybeReassignPartitionAndElectLeader() throws Exception {
 KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS,
   ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener");
  Collection<Broker> brokers = scala.collection.JavaConversions.asJavaCollection(zkClient.getAllBrokersInCluster());
     _replicationFactor, currentReplicationFactor, _topic, _zkConnect);
  if (expectedReplicationFactor > currentReplicationFactor && !zkClient.reassignPartitionsInProgress()) {
   LOG.info("MultiClusterTopicManagementService will increase the replication factor of the topic {} in cluster {}"
     + "from {} to {}", _topic, _zkConnect, currentReplicationFactor, expectedReplicationFactor);
  Properties currentProperties = zkClient.getEntityConfigs(ConfigType.Topic(), _topic);
  Properties expectedProperties = new Properties();
  for (Object key: currentProperties.keySet())
   LOG.info("MultiClusterTopicManagementService will overwrite properties of the topic {} "
     + "in cluster {} from {} to {}.", _topic, _zkConnect, currentProperties, expectedProperties);
   zkClient.setOrCreateEntityConfigs(ConfigType.Topic(), _topic, expectedProperties);
    !zkClient.reassignPartitionsInProgress()) {
   LOG.info("MultiClusterTopicManagementService will reassign partitions of the topic {} in cluster {}", _topic, _zkConnect);
   reassignPartitions(zkClient, brokers, _topic, partitionInfoList.size(), expectedReplicationFactor);
  zkClient.close();
origin: apache/flume

private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets(
    KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer) {
 Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
 List<PartitionInfo> partitions = consumer.partitionsFor(topicStr);
 for (PartitionInfo partition : partitions) {
  TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition());
  Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition);
  if (optionOffset.nonEmpty()) {
   Long offset = (Long) optionOffset.get();
   OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset);
   offsets.put(topicPartition, offsetAndMetadata);
  }
 }
 return offsets;
}
origin: linkedin/kafka-monitor

private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) {
 scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
 scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments =
   zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
 List<PartitionInfo> partitionInfoList = new ArrayList<>();
 scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator();
 while (it.hasNext()) {
  scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
  Integer partition = (Integer) scalaTuple._1();
  scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition));
  Node leader = leaderOption.isEmpty() ?  null : new Node((Integer) leaderOption.get(), "", -1);
  Node[] replicas = new Node[scalaTuple._2().size()];
  for (int i = 0; i < replicas.length; i++) {
   Integer brokerId = (Integer) scalaTuple._2().apply(i);
   replicas[i] = new Node(brokerId, "", -1);
  }
  partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null));
 }
 return partitionInfoList;
}
origin: apache/flume

/**
 * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper.
 * Allows for backwards compatibility of the zookeeperConnect configuration.
 */
private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener")) {
  List<Broker> brokerList =
      JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava();
  List<BrokerEndPoint> endPoints = brokerList.stream()
      .map(broker -> broker.brokerEndPoint(
        ListenerName.forSecurityProtocol(securityProtocol))
      )
      .collect(Collectors.toList());
  List<String> connections = new ArrayList<>();
  for (BrokerEndPoint endPoint : endPoints) {
   connections.add(endPoint.connectionString());
  }
  return StringUtils.join(connections, ',');
 }
}
origin: linkedin/kafka-monitor

void maybeAddPartitions(int minPartitionNum) {
 KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS,
   ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener");
 AdminZkClient adminZkClient = new AdminZkClient(zkClient);
 try {
  scala.collection.Map<Object, scala.collection.Seq<Object>> existingAssignment = getPartitionAssignment(zkClient, _topic);
  int partitionNum = existingAssignment.size();
  if (partitionNum < minPartitionNum) {
   LOG.info("MultiClusterTopicManagementService will increase partition of the topic {} "
     + "in cluster {} from {} to {}.", _topic, _zkConnect, partitionNum, minPartitionNum);
   scala.Option<scala.collection.Map<java.lang.Object, scala.collection.Seq<java.lang.Object>>> replicaAssignment = scala.Option.apply(null);
   scala.Option<Seq<Object>> brokerList = scala.Option.apply(null);
   adminZkClient.addPartitions(_topic, existingAssignment, adminZkClient.getBrokerMetadatas(RackAwareMode.Disabled$.MODULE$, brokerList), minPartitionNum, replicaAssignment, false);
  }
 } finally {
  zkClient.close();
 }
}
origin: apache/flume

private void migrateOffsets() {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) {
  Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer);
  if (!kafkaOffsets.isEmpty()) {
   logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr);
   logger.debug("Offsets found: {}", kafkaOffsets);
   return;
  }
  logger.info("No Kafka offsets found. Migrating zookeeper offsets");
  Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets =
      getZookeeperOffsets(zkClient, consumer);
  if (zookeeperOffsets.isEmpty()) {
   logger.warn("No offsets to migrate found in Zookeeper");
   return;
  }
  logger.info("Committing Zookeeper offsets to Kafka");
  logger.debug("Offsets to commit: {}", zookeeperOffsets);
  consumer.commitSync(zookeeperOffsets);
  // Read the offsets to verify they were committed
  Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer);
  logger.debug("Offsets committed: {}", newKafkaOffsets);
  if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) {
   throw new FlumeException("Offsets could not be committed");
  }
 }
}
origin: allegro/hermes

private boolean isMigrationToNewKafkaTopic(KafkaTopics kafkaTopics) {
  return kafkaTopics.getSecondary().isPresent() &&
      !kafkaZkClient.topicExists(kafkaTopics.getPrimary().name().asString());
}
origin: allegro/hermes

private KafkaZkClient kafkaZkClient(ZooKeeperClient zooKeeperClient) {
  return new KafkaZkClient(zooKeeperClient, false, Time.SYSTEM);
}
origin: allegro/hermes

  private boolean isMarkedForDeletion(String kafkaClusterName, KafkaTopic kafkaTopic) {
    return zkClients.get(kafkaClusterName).isTopicMarkedForDeletion(kafkaTopic.name().asString());
  }
}
origin: allegro/hermes

@Override
@SuppressWarnings("unchecked")
public int readLeaderForPartition(TopicAndPartition topicAndPartition) {
  try {
    TopicPartition topicPartition = new TopicPartition(topicAndPartition.topic(), topicAndPartition.partition());
    return (int)kafkaZkClient.getLeaderForPartition(topicPartition).get();
  } catch (Exception exception) {
    throw new BrokerNotFoundForPartitionException(topicAndPartition.topic(), topicAndPartition.partition(), exception);
  }
}
origin: apache/flume

private void migrateOffsets(String topicStr) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
origin: org.apache.flume.flume-ng-sources/flume-kafka-source

/**
 * Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper.
 * Allows for backwards compatibility of the zookeeperConnect configuration.
 */
private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener")) {
  List<Broker> brokerList =
      JavaConverters.seqAsJavaListConverter(zkClient.getAllBrokersInCluster()).asJava();
  List<BrokerEndPoint> endPoints = brokerList.stream()
      .map(broker -> broker.brokerEndPoint(
        ListenerName.forSecurityProtocol(securityProtocol))
      )
      .collect(Collectors.toList());
  List<String> connections = new ArrayList<>();
  for (BrokerEndPoint endPoint : endPoints) {
   connections.add(endPoint.connectionString());
  }
  return StringUtils.join(connections, ',');
 }
}
origin: allegro/hermes

@Override
public boolean topicExists(Topic topic) {
  return kafkaNamesMapper.toKafkaTopics(topic)
      .allMatch(kafkaTopic -> kafkaZkClient.topicExists(kafkaTopic.name().asString()));
}
origin: allegro/hermes

private BrokerOperations(Map<String, String> kafkaZkConnection, int sessionTimeout, int connectionTimeout,
             int maxInflightRequests, String namespace) {
  zkClients = kafkaZkConnection.entrySet().stream()
      .collect(toMap(Map.Entry::getKey,
              e -> {
                ZooKeeperClient zooKeeperClient = new ZooKeeperClient(
                    e.getValue(), connectionTimeout, sessionTimeout, maxInflightRequests,
                    Time.SYSTEM, ZOOKEEPER_METRIC_GROUP, ZOOKEEPER_METRIC_TYPE);
                return new KafkaZkClient(zooKeeperClient, false, Time.SYSTEM);
              }));
  kafkaNamesMapper = new JsonToAvroMigrationKafkaNamesMapper(namespace);
}
origin: pl.allegro.tech.hermes/hermes-test-helper

  private boolean isMarkedForDeletion(String kafkaClusterName, KafkaTopic kafkaTopic) {
    return zkClients.get(kafkaClusterName).isTopicMarkedForDeletion(kafkaTopic.name().asString());
  }
}
origin: org.apache.flume.flume-ng-channels/flume-kafka-channel

private void migrateOffsets() {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps)) {
  Map<TopicPartition, OffsetAndMetadata> kafkaOffsets = getKafkaOffsets(consumer);
  if (!kafkaOffsets.isEmpty()) {
   logger.info("Found Kafka offsets for topic {}. Will not migrate from zookeeper", topicStr);
   logger.debug("Offsets found: {}", kafkaOffsets);
   return;
  }
  logger.info("No Kafka offsets found. Migrating zookeeper offsets");
  Map<TopicPartition, OffsetAndMetadata> zookeeperOffsets =
      getZookeeperOffsets(zkClient, consumer);
  if (zookeeperOffsets.isEmpty()) {
   logger.warn("No offsets to migrate found in Zookeeper");
   return;
  }
  logger.info("Committing Zookeeper offsets to Kafka");
  logger.debug("Offsets to commit: {}", zookeeperOffsets);
  consumer.commitSync(zookeeperOffsets);
  // Read the offsets to verify they were committed
  Map<TopicPartition, OffsetAndMetadata> newKafkaOffsets = getKafkaOffsets(consumer);
  logger.debug("Offsets committed: {}", newKafkaOffsets);
  if (!newKafkaOffsets.keySet().containsAll(zookeeperOffsets.keySet())) {
   throw new FlumeException("Offsets could not be committed");
  }
 }
}
origin: allegro/hermes

public boolean topicExists(String topicName, String kafkaClusterName) {
  Topic topic = topic(topicName).build();
  return kafkaNamesMapper.toKafkaTopics(topic)
      .allMatch(kafkaTopic -> zkClients.get(kafkaClusterName).topicExists(kafkaTopic.name().asString()) &&
          !isMarkedForDeletion(kafkaClusterName, kafkaTopic));
}
origin: pl.allegro.tech.hermes/hermes-test-helper

private BrokerOperations(Map<String, String> kafkaZkConnection, int sessionTimeout, int connectionTimeout,
             int maxInflightRequests, String namespace) {
  zkClients = kafkaZkConnection.entrySet().stream()
      .collect(toMap(Map.Entry::getKey,
              e -> {
                ZooKeeperClient zooKeeperClient = new ZooKeeperClient(
                    e.getValue(), connectionTimeout, sessionTimeout, maxInflightRequests,
                    Time.SYSTEM, ZOOKEEPER_METRIC_GROUP, ZOOKEEPER_METRIC_TYPE);
                return new KafkaZkClient(zooKeeperClient, false, Time.SYSTEM);
              }));
  kafkaNamesMapper = new JsonToAvroMigrationKafkaNamesMapper(namespace);
}
origin: apache/flume

 private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets(
     KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer, String topicStr) {

  Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
  List<PartitionInfo> partitions = consumer.partitionsFor(topicStr);
  for (PartitionInfo partition : partitions) {
   TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition());
   Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition);
   if (optionOffset.nonEmpty()) {
    Long offset = (Long) optionOffset.get();
    OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset);
    offsets.put(topicPartition, offsetAndMetadata);
   }
  }
  return offsets;
 }
}
origin: org.apache.flume.flume-ng-sources/flume-kafka-source

private void migrateOffsets(String topicStr) {
 try (KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperConnect,
     JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, 10,
     Time.SYSTEM, "kafka.server", "SessionExpireListener");
kafka.zkKafkaZkClient

Most used methods

  • apply
  • <init>
  • topicExists
  • getAllBrokersInCluster
  • getConsumerOffset
  • getLeaderForPartition
  • isTopicMarkedForDeletion
  • close
  • createPartitionReassignment
  • getBroker
  • getEntityConfigs
  • getPartitionAssignmentForTopics
  • getEntityConfigs,
  • getPartitionAssignmentForTopics,
  • reassignPartitionsInProgress,
  • setOrCreateEntityConfigs

Popular in Java

  • Finding current android device location
  • getApplicationContext (Context)
  • scheduleAtFixedRate (ScheduledExecutorService)
  • getSharedPreferences (Context)
  • Component (java.awt)
    A component is an object having a graphical representation that can be displayed on the screen and t
  • Selector (java.nio.channels)
    A controller for the selection of SelectableChannel objects. Selectable channels can be registered w
  • Stack (java.util)
    Stack is a Last-In/First-Out(LIFO) data structure which represents a stack of objects. It enables u
  • Executor (java.util.concurrent)
    An object that executes submitted Runnable tasks. This interface provides a way of decoupling task s
  • BoxLayout (javax.swing)
  • JButton (javax.swing)
  • From CI to AI: The AI layer in your organization
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now