public static Map<TopicPartition, OffsetAndTimestamp> fromTopicPartitionOffsetAndTimestamp(Map<org.apache.kafka.common.TopicPartition, org.apache.kafka.clients.consumer.OffsetAndTimestamp> topicPartitionOffsetAndTimestamps) { return topicPartitionOffsetAndTimestamps.entrySet().stream() .filter(e-> e.getValue() != null) .collect(Collectors.toMap( e -> new TopicPartition(e.getKey().topic(), e.getKey().partition()), e ->new OffsetAndTimestamp(e.getValue().offset(), e.getValue().timestamp())) ); } }
@Override public void offsetsForTimes(TopicPartition topicPartition, Long timestamp, Handler<AsyncResult<OffsetAndTimestamp>> handler) { Map<TopicPartition, Long> topicPartitions = new HashMap<>(); topicPartitions.put(topicPartition, timestamp); this.stream.offsetsForTimes(Helper.toTopicPartitionTimes(topicPartitions), done -> { if(done.succeeded()) { if (done.result().values().size() == 1) { org.apache.kafka.common.TopicPartition kTopicPartition = new org.apache.kafka.common.TopicPartition (topicPartition.getTopic(), topicPartition.getPartition()); org.apache.kafka.clients.consumer.OffsetAndTimestamp offsetAndTimestamp = done.result().get(kTopicPartition); if(offsetAndTimestamp != null) { OffsetAndTimestamp resultOffsetAndTimestamp = new OffsetAndTimestamp(offsetAndTimestamp.offset(), offsetAndTimestamp.timestamp()); handler.handle(Future.succeededFuture(resultOffsetAndTimestamp)); } // offsetAndTimestamp is null, i.e., search by timestamp did not lead to a result else { handler.handle(Future.succeededFuture()); } } else if (done.result().values().size() == 0) { handler.handle(Future.succeededFuture()); } else { handler.handle(Future.failedFuture("offsetsForTimes should return exactly one OffsetAndTimestamp")); } } else { handler.handle(Future.failedFuture(done.cause())); } }); }