/** * Get the first offset for the given partitions. * <p> * This method does not change the current consumer position of the partitions. * * @see #seekToBeginning(Collection) * * @param partitions the partitions to get the earliest offsets. * @return The earliest available offsets for the given partitions * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * expiration of the configured {@code default.api.timeout.ms} */ @Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions) { return beginningOffsets(partitions, Duration.ofMillis(defaultApiTimeoutMs)); }
@Override public boolean exists() { boolean exists = false; try (KafkaConsumer<String, String> historyConsumer = new KafkaConsumer<>(consumerConfig.asProperties());) { // First, check if the topic exists in the list of all topics if (historyConsumer.listTopics().keySet().contains(topicName)) { // check if the topic is empty Set<TopicPartition> historyTopic = Collections.singleton(new TopicPartition(topicName, PARTITION)); Map<TopicPartition, Long> beginningOffsets = historyConsumer.beginningOffsets(historyTopic); Map<TopicPartition, Long> endOffsets = historyConsumer.endOffsets(historyTopic); Long beginOffset = beginningOffsets.entrySet().iterator().next().getValue(); Long endOffset = endOffsets.entrySet().iterator().next().getValue(); exists = endOffset > beginOffset; } } return exists; }
@Override public String formatAsText() { try (KafkaConsumer consumer = new KafkaConsumer(consumerProperties) { }) { //noinspection unchecked List<PartitionInfo> partitionsInfo = consumer.partitionsFor(topic); List<TopicPartition> topicPartitions = partitionsInfo.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); Map endOffsets = consumer.endOffsets(topicPartitions); Map startOffsets = consumer.beginningOffsets(topicPartitions); return partitionsInfo.stream() .map(partitionInfo -> String.format("%s [start offset = [%s], end offset = [%s]]", partitionInfo.toString(), startOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())), endOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())))) .collect(Collectors.joining("\n")); } catch (Exception e) { return String.format("ERROR fetching metadata for Topic [%s], Connection String [%s], Error [%s]", topic, consumerProperties.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), e.getMessage()); } } }
@Test(expected = AuthenticationException.class) public void testBeginningOffsetsAuthenticationFailure() { final KafkaConsumer<String, String> consumer = consumerWithPendingAuthentication(); consumer.beginningOffsets(Collections.singleton(tp0)); }
throw new RuntimeException(e); starOffsetsMap = consumer.beginningOffsets(topicPartitions); endOffsetsMap = consumer.endOffsets(topicPartitions);
try { prepareConsumerOffset(); Map<TopicPartition, Long> beginningOffsets = _consumer.beginningOffsets(_consumer.assignment()); Map<TopicPartition, Long> endOffsets = _consumer.endOffsets(_consumer.assignment()); LOG.debug("Loading beginning offsets: {}, loading end offsets: {}", beginningOffsets, endOffsets);
/** * Gets the total message count for the topic. * <b>WARNING: Don't use with compacted topics</b> */ @SuppressWarnings("unchecked") public long getCount(String kafkaBrokers, String topic) { KafkaConsumer consumer = buildConsumer(kafkaBrokers); try { @SuppressWarnings("unchecked") Map<String, List<PartitionInfo>> topics = consumer.listTopics(); List<PartitionInfo> partitionInfos = topics.get(topic); if (partitionInfos == null) { logger.warn("Partition information was not found for topic {}", topic); return 0; } else { Collection<TopicPartition> partitions = new ArrayList<>(); for (PartitionInfo partitionInfo : partitionInfos) { TopicPartition partition = new TopicPartition(topic, partitionInfo.partition()); partitions.add(partition); } Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions); Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions); return diffOffsets(beginningOffsets, endingOffsets); } } finally { consumer.close(); } }
public static long getOffset( KafkaConsumer<byte[], byte[]> consumer, String topic, int partition, boolean forceFromStart) { TopicPartition topicAndPartition = new TopicPartition (topic, partition); Map<TopicPartition, Long > offsetMap = null; if(forceFromStart) { offsetMap = consumer.beginningOffsets(Arrays.asList(topicAndPartition)); } else { offsetMap = consumer.endOffsets(Arrays.asList(topicAndPartition)); } if(offsetMap.get(topicAndPartition) != null ) { return offsetMap.get(topicAndPartition); } else { return NO_OFFSET; } }
/** * It fetches earliest offset ranges available for given topic-partitions. */ public static Map<TopicPartition, Long> getEarliestLeaderOffsets(@NonNull final KafkaConsumer kafkaConsumer, @NotEmpty final String topicName, @NonNull final Set<TopicPartition> topicPartitions) { kafkaConsumer.assign(topicPartitions); verifyTopicPartitions(kafkaConsumer, topicName, topicPartitions); final Map<TopicPartition, Long> earliestLeaderOffsets = kafkaConsumer.beginningOffsets(topicPartitions); log.info("topic-partition earliest offsets :{}", earliestLeaderOffsets); return earliestLeaderOffsets; }
private List<PartitionOffset> getHeadOffsets() { final Map<TopicPartition, Long> results = kafkaConsumer.beginningOffsets(getAllPartitions()); final List<PartitionOffset> offsets = new ArrayList<>(); for (final Map.Entry<TopicPartition, Long> entry : results.entrySet()) { offsets.add(new PartitionOffset(entry.getKey().partition(), entry.getValue())); } return offsets; }
@Override public void reset() { // we just commit the first offset log.info("Reset committed offsets for all assigned partitions: " + topicPartitions + " tailer: " + id); Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(topicPartitions); Map<TopicPartition, OffsetAndMetadata> offsetToCommit = new HashMap<>(); beginningOffsets.forEach((tp, offset) -> offsetToCommit.put(tp, new OffsetAndMetadata(offset))); consumer.commitSync(offsetToCommit); lastCommittedOffsets.clear(); toLastCommitted(); }
@Override public boolean exists() { boolean exists = false; try (KafkaConsumer<String, String> historyConsumer = new KafkaConsumer<>(consumerConfig.asProperties());) { // First, check if the topic exists in the list of all topics if (historyConsumer.listTopics().keySet().contains(topicName)) { // check if the topic is empty Set<TopicPartition> historyTopic = Collections.singleton(new TopicPartition(topicName, PARTITION)); Map<TopicPartition, Long> beginningOffsets = historyConsumer.beginningOffsets(historyTopic); Map<TopicPartition, Long> endOffsets = historyConsumer.endOffsets(historyTopic); Long beginOffset = beginningOffsets.entrySet().iterator().next().getValue(); Long endOffset = endOffsets.entrySet().iterator().next().getValue(); exists = endOffset > beginOffset; } } return exists; }
private void seekToHead() { // Get all available partitions final List<TopicPartition> topicPartitions = getAllPartitions(); // Get head offsets for each partition final Map<TopicPartition, Long> headOffsets = kafkaConsumer.beginningOffsets(topicPartitions); seek(headOffsets); }
@Override public void reset(LogPartition partition) { log.info("Reset committed offset for partition: " + partition + " tailer: " + id); TopicPartition topicPartition = new TopicPartition(ns.getTopicName(partition.name()), partition.partition()); Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(Collections.singleton(topicPartition)); Map<TopicPartition, OffsetAndMetadata> offsetToCommit = new HashMap<>(); beginningOffsets.forEach((tp, offset) -> offsetToCommit.put(tp, new OffsetAndMetadata(offset))); consumer.commitSync(offsetToCommit); lastCommittedOffsets.remove(topicPartition); seek(new LogOffsetImpl(partition, beginningOffsets.get(topicPartition))); }
boolean areOffsetsAvailable(KafkaTopic topic) { try { return storage.readPartitionsIds(topic.name().asString()).stream().allMatch(partition -> { TopicPartition topicPartition = new TopicPartition(topic.name().asString(), partition); consumerPool.get(topic, partition).beginningOffsets(Collections.singleton(topicPartition)); return true; }); } catch (PartitionsNotFoundForGivenTopicException | BrokerNotFoundForPartitionException | org.apache.kafka.common.errors.TimeoutException e) { logger.debug("Offsets reported as not available due to failure", e); return false; } } }
throw new RuntimeException(e); starOffsetsMap = consumer.beginningOffsets(topicPartitions); endOffsetsMap = consumer.endOffsets(topicPartitions);
@Override public String formatAsText() { try (KafkaConsumer consumer = new KafkaConsumer(consumerProperties) { }) { //noinspection unchecked List<PartitionInfo> partitionsInfo = consumer.partitionsFor(topic); List<TopicPartition> topicPartitions = partitionsInfo.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); Map endOffsets = consumer.endOffsets(topicPartitions); Map startOffsets = consumer.beginningOffsets(topicPartitions); return partitionsInfo.stream() .map(partitionInfo -> String.format("%s [start offset = [%s], end offset = [%s]]", partitionInfo.toString(), startOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())), endOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())))) .collect(Collectors.joining("\n")); } catch (Exception e) { return String.format("ERROR fetching metadata for Topic [%s], Connection String [%s], Error [%s]", topic, consumerProperties.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), e.getMessage()); } } }
/** * Seek to the HEAD of a topic. */ public ConsumerState toHead() { // Get all available partitions final List<TopicPartition> topicPartitions = getAllPartitions(); // Get head offsets for each partition final Map<TopicPartition, Long> headOffsets = kafkaConsumer.beginningOffsets(topicPartitions); // Loop over each partition for (final TopicPartition topicPartition: topicPartitions) { final long newOffset = headOffsets.get(topicPartition); logger.info("Resetting Partition: {} To Head Offset: {}", topicPartition.partition(), newOffset); // Seek to earlier offset kafkaConsumer.seek(topicPartition, newOffset); } commit(); return getConsumerState(); }
/** * Seek to the previous 'page' of records. */ public void previous() { // Get all available partitions final List<TopicPartition> topicPartitions = getAllPartitions(); // Get head offsets for each partition final Map<TopicPartition, Long> headOffsets = kafkaConsumer.beginningOffsets(topicPartitions); // Loop over each partition for (final TopicPartition topicPartition: topicPartitions) { // Calculate our previous offsets final long headOffset = headOffsets.get(topicPartition); final long currentOffset = kafkaConsumer.position(topicPartition); long newOffset = currentOffset - (clientConfig.getMaxResultsPerPartition() * 2); // Can't go before the head position! if (newOffset < headOffset) { newOffset = headOffset; } logger.info("Partition: {} Previous Offset: {} New Offset: {}", topicPartition.partition(), currentOffset, newOffset); // Seek to earlier offset kafkaConsumer.seek(topicPartition, newOffset); } commit(); }
@Test public void testStartNoStoredPartitionsStartBeginning() throws Exception { opts.put(KafkaSourceConnectorConfig.CONSUMER_AUTO_OFFSET_RESET_CONFIG, "earliest"); config = new KafkaSourceConnectorConfig(opts); props = new Properties(); props.putAll(config.allWithPrefix(KafkaSourceConnectorConfig.CONSUMER_PREFIX)); TopicPartition firstTopicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION); Collection<TopicPartition> topicPartitions = new ArrayList<>(); topicPartitions.add(firstTopicPartition); Map<TopicPartition, Long> endOffsets = Collections.singletonMap(firstTopicPartition, FIRST_OFFSET); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(new HashMap<>()); PowerMock.expectNew(KafkaConsumer.class, new Class[] { Properties.class }, config.getKafkaConsumerProperties()) .andReturn(consumer); EasyMock.expect(consumer.beginningOffsets(topicPartitions)).andReturn(endOffsets); consumer.assign(topicPartitions); EasyMock.expectLastCall(); consumer.seek(firstTopicPartition, FIRST_OFFSET); EasyMock.expectLastCall(); replayAll(); objectUnderTest.start(opts); verifyAll(); }