/** * Get the end offsets for the given partitions. In the default {@code read_uncommitted} isolation level, the end * offset is the high watermark (that is, the offset of the last successfully replicated message plus one). For * {@code read_committed} consumers, the end offset is the last stable offset (LSO), which is the minimum of * the high watermark and the smallest offset of any open transaction. Finally, if the partition has never been * written to, the end offset is 0. * * <p> * This method does not change the current consumer position of the partitions. * * @see #seekToEnd(Collection) * * @param partitions the partitions to get the end offsets. * @return The end offsets for the given partitions. * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * the amount of time allocated by {@code request.timeout.ms} expires */ @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions) { return endOffsets(partitions, Duration.ofMillis(requestTimeoutMs)); }
private Long getEndOffsetOfDbHistoryTopic(Long previousEndOffset, KafkaConsumer<String, String> historyConsumer) { Map<TopicPartition, Long> offsets = historyConsumer.endOffsets(Collections.singleton(new TopicPartition(topicName, PARTITION))); Long endOffset = offsets.entrySet().iterator().next().getValue(); // The end offset should never change during recovery; doing this check here just as - a rather weak - attempt // to spot other connectors that share the same history topic accidentally if(previousEndOffset != null && !previousEndOffset.equals(endOffset)) { throw new IllegalStateException("Detected changed end offset of database history topic (previous: " + previousEndOffset + ", current: " + endOffset + "). Make sure that the same history topic isn't shared by multiple connector instances." ); } return endOffset; }
@Override public boolean exists() { boolean exists = false; try (KafkaConsumer<String, String> historyConsumer = new KafkaConsumer<>(consumerConfig.asProperties());) { // First, check if the topic exists in the list of all topics if (historyConsumer.listTopics().keySet().contains(topicName)) { // check if the topic is empty Set<TopicPartition> historyTopic = Collections.singleton(new TopicPartition(topicName, PARTITION)); Map<TopicPartition, Long> beginningOffsets = historyConsumer.beginningOffsets(historyTopic); Map<TopicPartition, Long> endOffsets = historyConsumer.endOffsets(historyTopic); Long beginOffset = beginningOffsets.entrySet().iterator().next().getValue(); Long endOffset = endOffsets.entrySet().iterator().next().getValue(); exists = endOffset > beginOffset; } } return exists; }
@Override public String formatAsText() { try (KafkaConsumer consumer = new KafkaConsumer(consumerProperties) { }) { //noinspection unchecked List<PartitionInfo> partitionsInfo = consumer.partitionsFor(topic); List<TopicPartition> topicPartitions = partitionsInfo.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); Map endOffsets = consumer.endOffsets(topicPartitions); Map startOffsets = consumer.beginningOffsets(topicPartitions); return partitionsInfo.stream() .map(partitionInfo -> String.format("%s [start offset = [%s], end offset = [%s]]", partitionInfo.toString(), startOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())), endOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())))) .collect(Collectors.joining("\n")); } catch (Exception e) { return String.format("ERROR fetching metadata for Topic [%s], Connection String [%s], Error [%s]", topic, consumerProperties.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), e.getMessage()); } } }
@Test(expected = AuthenticationException.class) public void testEndOffsetsAuthenticationFailure() { final KafkaConsumer<String, String> consumer = consumerWithPendingAuthentication(); consumer.endOffsets(Collections.singleton(tp0)); }
endOffsetsMap = consumer.endOffsets(topicPartitions);
prepareConsumerOffset(); Map<TopicPartition, Long> beginningOffsets = _consumer.beginningOffsets(_consumer.assignment()); Map<TopicPartition, Long> endOffsets = _consumer.endOffsets(_consumer.assignment()); LOG.debug("Loading beginning offsets: {}, loading end offsets: {}", beginningOffsets, endOffsets); for (Map.Entry<TopicPartition, Long> entry : beginningOffsets.entrySet()) {
/** * Gets the last offset for each partition for the given topic. */ @SuppressWarnings("unchecked") public Map<Integer, Long> getEndingOffsets(String kafkaBrokers, String topic) { Map<Integer, Long> retval = new HashMap<>(); KafkaConsumer consumer = buildConsumer(kafkaBrokers); try { Map<String, List<PartitionInfo>> topics = consumer.listTopics(); List<PartitionInfo> partitionInfos = topics.get(topic); if (partitionInfos == null) { logger.warn("Partition information was not found for topic {}", topic); } else { Collection<TopicPartition> partitions = new ArrayList<>(); for (PartitionInfo partitionInfo : partitionInfos) { partitions.add(new TopicPartition(topic, partitionInfo.partition())); } Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions); for (TopicPartition partition : endingOffsets.keySet()) { retval.put(partition.partition(), endingOffsets.get(partition)); } } } finally { consumer.close(); } return retval; }
/** * Gets the total message count for the topic. * <b>WARNING: Don't use with compacted topics</b> */ @SuppressWarnings("unchecked") public long getCount(String kafkaBrokers, String topic) { KafkaConsumer consumer = buildConsumer(kafkaBrokers); try { @SuppressWarnings("unchecked") Map<String, List<PartitionInfo>> topics = consumer.listTopics(); List<PartitionInfo> partitionInfos = topics.get(topic); if (partitionInfos == null) { logger.warn("Partition information was not found for topic {}", topic); return 0; } else { Collection<TopicPartition> partitions = new ArrayList<>(); for (PartitionInfo partitionInfo : partitionInfos) { TopicPartition partition = new TopicPartition(topic, partitionInfo.partition()); partitions.add(partition); } Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions); Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions); return diffOffsets(beginningOffsets, endingOffsets); } } finally { consumer.close(); } }
private Long getEndOffsetOfDbHistoryTopic(Long previousEndOffset, KafkaConsumer<String, String> historyConsumer) { Map<TopicPartition, Long> offsets = historyConsumer.endOffsets(Collections.singleton(new TopicPartition(topicName, PARTITION))); Long endOffset = offsets.entrySet().iterator().next().getValue(); // The end offset should never change during recovery; doing this check here just as - a rather weak - attempt // to spot other connectors that share the same history topic accidentally if(previousEndOffset != null && !previousEndOffset.equals(endOffset)) { throw new IllegalStateException("Detected changed end offset of database history topic (previous: " + previousEndOffset + ", current: " + endOffset + "). Make sure that the same history topic isn't shared by multiple connector instances." ); } return endOffset; }
public static long getOffset( KafkaConsumer<byte[], byte[]> consumer, String topic, int partition, boolean forceFromStart) { TopicPartition topicAndPartition = new TopicPartition (topic, partition); Map<TopicPartition, Long > offsetMap = null; if(forceFromStart) { offsetMap = consumer.beginningOffsets(Arrays.asList(topicAndPartition)); } else { offsetMap = consumer.endOffsets(Arrays.asList(topicAndPartition)); } if(offsetMap.get(topicAndPartition) != null ) { return offsetMap.get(topicAndPartition); } else { return NO_OFFSET; } }
/** * It fetches latest offset ranges available for given topic-partitions. */ public static Map<TopicPartition, Long> getLatestLeaderOffsets(@NonNull final KafkaConsumer kafkaConsumer, @NotEmpty final String topicName, @NonNull final Set<TopicPartition> topicPartitions) { kafkaConsumer.assign(topicPartitions); verifyTopicPartitions(kafkaConsumer, topicName, topicPartitions); final Map<TopicPartition, Long> latestLeaderOffsets = kafkaConsumer.endOffsets(topicPartitions); log.info("topic-partition latest offsets :{}", latestLeaderOffsets); return latestLeaderOffsets; }
@Override public TrackingToken fetchToken(final String processorName, final int segment) throws UnableToClaimTokenException { // segment is currently only 0 = root, but will be eventually used for partitioning/parallelization log.info("Fetch token {}, {}", processorName, segment); Optional<Long> offset = Optional.empty(); try { offset = Optional.of(consumer.endOffsets(Arrays.asList(this.topicPartition)).get(this.topicPartition)); } catch (Exception e) { log.error("Error claiming token", e); throw new UnableToClaimTokenException("Error claiming a token for processor " + processorName); } return new GlobalSequenceTrackingToken(offset.orElse(Long.valueOf(0)).longValue()); }
private long getEndingOffset(KafkaConsumer<byte[], byte[]> kafkaConsumer, KafkaTopic topicName, int partition) { TopicPartition topicPartition = new TopicPartition(topicName.name().asString(), partition); Map<TopicPartition, Long> offsets = kafkaConsumer.endOffsets(Collections.singleton(topicPartition)); return Optional.ofNullable(offsets.get(topicPartition)) .orElseThrow(() -> new OffsetNotFoundException(String.format("Ending offset for partition %s not found", topicPartition))); } }
private List<PartitionOffset> getTailOffsets() { final Map<TopicPartition, Long> results = kafkaConsumer.endOffsets(getAllPartitions()); final List<PartitionOffset> offsets = new ArrayList<>(); for (final Map.Entry<TopicPartition, Long> entry : results.entrySet()) { offsets.add(new PartitionOffset(entry.getKey().partition(), entry.getValue())); } return offsets; }
@Override public boolean exists() { boolean exists = false; try (KafkaConsumer<String, String> historyConsumer = new KafkaConsumer<>(consumerConfig.asProperties());) { // First, check if the topic exists in the list of all topics if (historyConsumer.listTopics().keySet().contains(topicName)) { // check if the topic is empty Set<TopicPartition> historyTopic = Collections.singleton(new TopicPartition(topicName, PARTITION)); Map<TopicPartition, Long> beginningOffsets = historyConsumer.beginningOffsets(historyTopic); Map<TopicPartition, Long> endOffsets = historyConsumer.endOffsets(historyTopic); Long beginOffset = beginningOffsets.entrySet().iterator().next().getValue(); Long endOffset = endOffsets.entrySet().iterator().next().getValue(); exists = endOffset > beginOffset; } } return exists; }
private void seekToTail() { // Get all available partitions final List<TopicPartition> topicPartitions = getAllPartitions(); // Get head offsets for each partition final Map<TopicPartition, Long> tailOffsets = kafkaConsumer.endOffsets(topicPartitions); seek(tailOffsets); }
@Override public long getLag() { // Periodically cache the end offset if(endOffset == null || endOffsetWatch.getTime() > END_OFFSET_REFRESH_MS_DEFAULT) { Map<TopicPartition, Long> offsets = consumer.endOffsets(Collections.singletonList(new TopicPartition(topicName, 0))); endOffset = offsets.get(new TopicPartition(topicName, 0)); endOffsetWatch.reset(); endOffsetWatch.start(); } // Because the end offset is only updated periodically, it's possible to see negative lag. Send 0 instead. long lag = endOffset - (getCurrentOffset() == null ? 0 : getCurrentOffset()); return lag < 0 ? 0 : lag; }
/** * Seek to the TAIL of a topic. */ public ConsumerState toTail() { // Get all available partitions final List<TopicPartition> topicPartitions = getAllPartitions(); // Get head offsets for each partition final Map<TopicPartition, Long> tailOffsets = kafkaConsumer.endOffsets(topicPartitions); // Loop over each partition for (final TopicPartition topicPartition: topicPartitions) { final long newOffset = tailOffsets.get(topicPartition); logger.info("Resetting Partition: {} To Tail Offset: {}", topicPartition.partition(), newOffset); // Seek to earlier offset kafkaConsumer.seek(topicPartition, newOffset); } commit(); return getConsumerState(); } }
private void mockConsumerInitialization() throws Exception { TopicPartition firstTopicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION); Collection<TopicPartition> topicPartitions = new ArrayList<>(); topicPartitions.add(firstTopicPartition); Map<TopicPartition, Long> endOffsets = Collections.singletonMap(firstTopicPartition, FIRST_OFFSET); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(new HashMap<>()); PowerMock.expectNew(KafkaConsumer.class, new Class[] { Properties.class }, config.getKafkaConsumerProperties()) .andReturn(consumer); EasyMock.expect(consumer.endOffsets(topicPartitions)).andReturn(endOffsets); consumer.assign(topicPartitions); EasyMock.expectLastCall(); consumer.seek(firstTopicPartition, FIRST_OFFSET); EasyMock.expectLastCall(); }