@Override public FetchResponse fetch(FetchRequest request) { scala.collection.Traversable<Tuple2<TopicAndPartition, PartitionFetchInfo>> requestInfo = request.requestInfo(); java.util.Map<TopicAndPartition, Short> errorMap = new HashMap<>(); while (requestInfo.headOption().isDefined()) { // jfim: IntelliJ erroneously thinks the following line is an incompatible type error, but it's only because // it doesn't understand scala covariance when called from Java (ie. it thinks head() is of type A even though // it's really of type Tuple2[TopicAndPartition, PartitionFetchInfo]) Tuple2<TopicAndPartition, PartitionFetchInfo> t2 = requestInfo.head(); TopicAndPartition topicAndPartition = t2._1(); PartitionFetchInfo partitionFetchInfo = t2._2(); if (!topicAndPartition.topic().equals(topicName)) { errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else if (partitionLeaderIndices.length < topicAndPartition.partition()) { errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else if (partitionLeaderIndices[topicAndPartition.partition()] != index) { errorMap.put(topicAndPartition, Errors.NOT_LEADER_FOR_PARTITION.code()); } else { // Do nothing, we'll generate a fake message } requestInfo = requestInfo.tail(); } return new MockFetchResponse(errorMap); }
private void injectMetrics(final TopicAndPartition topicAndPartition) { if (!partitionInjected.contains(topicAndPartition)) { Metrics.getRegistry().register( String.format(OFFSET_LAG_NAME_FORMAT, topicAndPartition.topic(), topicAndPartition.partition()), new Gauge<Long>() { @Override public Long getValue() { if (partitionLag.containsKey(topicAndPartition)) { return partitionLag.get(topicAndPartition); } else { return -1L; } } }); partitionInjected.add(topicAndPartition); } }
private static long getLatestOffset(SimpleConsumer consumer, TopicAndPartition topicAndPartition) { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { logger.warn("Failed to fetch offset for {} due to {}", topicAndPartition, response.errorCode(topicAndPartition.topic(), topicAndPartition.partition())); return -1; } long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition()); return offsets[0]; } }
long latestOffset = latestOffsetResponse.offsets(topicAndPartition.topic(), topicAndPartition.partition())[0]; long earliestOffset = earliestOffsetResponse.offsets(topicAndPartition.topic(), topicAndPartition.partition())[0]; topicAndPartition.partition(), leader.getUri()); etlRequest.setLatestOffset(latestOffset); etlRequest.setEarliestOffset(earliestOffset);
private static String getOffsetLagName(TopicAndPartition tp) { return "OffsetMonitorLag." + tp.topic().replace('.', '_') + "." + tp.partition(); }
@Override @SuppressWarnings("unchecked") public int readLeaderForPartition(TopicAndPartition topicAndPartition) { try { TopicPartition topicPartition = new TopicPartition(topicAndPartition.topic(), topicAndPartition.partition()); return (int)kafkaZkClient.getLeaderForPartition(topicPartition).get(); } catch (Exception exception) { throw new BrokerNotFoundForPartitionException(topicAndPartition.topic(), topicAndPartition.partition(), exception); } }
protected void updateOffset() { logger.debug("OffsetMonitor updates offset with leaders=" + partitionLeader); offsetMonitorFailureCount.set(0); for (Map.Entry<TopicAndPartition, BrokerEndPoint> entry : partitionLeader.entrySet()) { String leaderBroker = getHostPort(entry.getValue()); TopicAndPartition tp = entry.getKey(); if (StringUtils.isEmpty(leaderBroker)) { logger.warn("{} does not have leader partition", tp); } else { try { cronExecutor.submit(updateOffsetTask(leaderBroker, tp)); } catch (RejectedExecutionException re) { offsetMonitorFailureCount.getAndAdd(1); logger.warn(String.format("cronExecutor is full! Drop task for topic: %s, partition: %d", tp.topic(), tp.partition()), re); throw re; } catch (Throwable t) { offsetMonitorFailureCount.getAndAdd(1); logger.error(String.format("cronExecutor got throwable! Drop task for topic: %s, partition: %d", tp.topic(), tp.partition()), t); throw t; } } } }
/** * Get stuck topic partitions via offset manager. * * @return the topic partitions that have been stuck for at least _movePartitionAfterStuckMillis. */ private Set<TopicPartition> getStuckTopicPartitions() { Set<TopicPartition> partitions = new HashSet<>(); if (_movePartitionAfterStuckMillis <= 0) { return partitions; } Map<TopicAndPartition, TopicPartitionLag> noProgressMap = _helixMirrorMakerManager.getOffsetMonitor() .getNoProgressTopicToOffsetMap(); long now = System.currentTimeMillis(); for (Map.Entry<TopicAndPartition, TopicPartitionLag> entry : noProgressMap.entrySet()) { TopicPartitionLag lastLag = entry.getValue(); if (now - lastLag.getTimeStamp() > _movePartitionAfterStuckMillis) { partitions.add(new TopicPartition(entry.getKey().topic(), entry.getKey().partition())); } } return partitions; }
Object obj = zk.readData(consumerOffsetPath + tp.topic() + "/" + tp.partition(), true); long commitOffset = obj == null ? -1 : Long.valueOf(String.valueOf(obj));
/** * @param zookeeperHosts * Zookeeper hosts e.g. localhost:2181. If multiple zookeeper * then host1:port1[,host2:port2,...] * @param groupID * consumer group to update * @param offsets * mapping of (topic and) partition to offset to push to * Zookeeper */ public void createOffsets(String zookeeperHosts, String groupID, Map<TopicAndPartition, Long> offsets) { try (SuperZkClient zkClient = new SuperZkClient(zookeeperHosts)) { for (Map.Entry<TopicAndPartition, Long> entry : offsets.entrySet()) { TopicAndPartition topicAndPartition = entry.getKey(); ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupID, topicAndPartition.topic()); int partition = topicAndPartition.partition(); long offset = entry.getValue(); String partitionOffsetPath = topicDirs.consumerOffsetDir() + "/" + partition; ZkUtils.updatePersistentPath(zkClient, partitionOffsetPath, Long.toString(offset)); } } }
private long getEarliestOffset() { // return kafka.api.OffsetRequest.EarliestTime(); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfoMap = new HashMap<>(); requestInfoMap.put(topicAndPartition, new PartitionOffsetRequestInfo(EARLIEST_TIME, 1)); OffsetRequest offsetRequest = new OffsetRequest(requestInfoMap, kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID); if (earliestOffset <= 0) { OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); earliestOffset = offsetResponse.offsets(topicAndPartition.topic(), topicAndPartition.partition())[0]; } return earliestOffset; }
private long getLatestOffset(SimpleConsumer consumer, TopicAndPartition topicAndPartition) { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { logger.warn("Failed to fetch offset for {} due to {}", topicAndPartition, response.errorCode(topicAndPartition.topic(), topicAndPartition.partition())); return -1; } long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition()); return offsets[0]; }
private long getLatestOffset() { // return kafka.api.OffsetRequest.LatestTime(); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfoMap = new HashMap<>(); requestInfoMap.put(topicAndPartition, new PartitionOffsetRequestInfo(LATEST_TIME, 1)); OffsetRequest offsetRequest = new OffsetRequest(requestInfoMap, kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID); if (latestOffset <= 0) { OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); latestOffset = offsetResponse.offsets(topicAndPartition.topic(), topicAndPartition.partition())[0]; } return latestOffset; }
private void createLeaderForPartition(TopicAndPartition topicAndPartition, int leaderId) throws Exception { String path = "/brokers/topics/" + topicAndPartition.topic() + "/partitions/" + topicAndPartition.partition() + "/state"; zookeeperClient.create().creatingParentsIfNeeded().forPath(path); zookeeperClient.setData().forPath(path, getSampleLeaderDetails(leaderId).getBytes()); }
@Override public void close() throws IOException { log.info( "Topic: {}, broker: {}, partition: {} ~ num. processed messages {}", topicAndPartition.topic(), split.getBrokerId(), topicAndPartition.partition(), numProcessedMessages ); if (numProcessedMessages > 0) { try(KafkaZkUtils zk = new KafkaZkUtils( conf.get(KafkaInputFormat.CONFIG_ZK_CONNECT), conf.getInt(KafkaInputFormat.CONFIG_ZK_SESSION_TIMEOUT_MS, 10000), conf.getInt(KafkaInputFormat.CONFIG_ZK_SESSION_TIMEOUT_MS, 10000) )) { new CheckpointManager(conf, zk) .commitOffsets(split.getTopic(), split.getPartition(), nextOffsetToConsume - 1); } } consumer.close(); }
.collect(Collectors.toMap(entry -> entry.getKey().partition(), entry -> entry.getValue().offset()));
long latestOffset = latestOffsetResponse.offsets(topicAndPartition.topic(), topicAndPartition.partition())[0]; long earliestOffset = earliestOffsetResponse.offsets(topicAndPartition.topic(), topicAndPartition.partition())[0]; topicAndPartition.partition(), leader.getUri()); etlRequest.setLatestOffset(latestOffset); etlRequest.setEarliestOffset(earliestOffset);
public long getLogEndOffset(String topic, int partition) { try { SimpleConsumer consumer = findLeaderConsumer(topic, partition); TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> infoMap = new HashMap<>(); infoMap.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); OffsetRequest request = new OffsetRequest(infoMap, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse response = consumer.getOffsetsBefore(request); // Retrieve offsets from response long[] offsets = response.hasError() ? null : response.offsets(topicAndPartition.topic(), topicAndPartition.partition()); if (offsets == null || offsets.length <= 0) { short errorCode = response.errorCode(topicAndPartition.topic(), topicAndPartition.partition()); // If the topic partition doesn't exists, use offset 0 without logging error. if (errorCode != ErrorMapping.UnknownTopicOrPartitionCode()) { log.warn("Failed to fetch latest offset for {}. Error: {}. Default offset to 0.", topicAndPartition, errorCode); } return 0L; } consumer.close(); return offsets[0]; } catch (Exception ex) { log.error("unable to retrieve offset {}", new Object[]{ex}); return -1; } }