public static Map<TopicPartition, Long> fromTopicPartitionOffsets(Map<org.apache.kafka.common.TopicPartition, Long> offsets) { return offsets.entrySet().stream().collect(Collectors.toMap( e -> new TopicPartition(e.getKey().topic(), e.getKey().partition()), Map.Entry::getValue) ); }
public static org.apache.kafka.common.TopicPartition to(TopicPartition topicPartition) { return new org.apache.kafka.common.TopicPartition(topicPartition.getTopic(), topicPartition.getPartition()); }
@Test public void testEquality(final TestContext context) { final TopicPartition t1 = new TopicPartition("topic1", 0); final TopicPartition t2 = new TopicPartition("topic1", 0); final TopicPartition t3 = new TopicPartition(null, 0); final TopicPartition t4 = new TopicPartition(null, 0); context.assertEquals(t1, t1); context.assertEquals(t1.hashCode(), t1.hashCode()); context.assertEquals(t1, t2); context.assertEquals(t1.hashCode(), t2.hashCode()); context.assertEquals(t3, t4); context.assertEquals(t3.hashCode(), t4.hashCode()); }
@Test public void testUnequality(final TestContext context) { final TopicPartition t1 = new TopicPartition("topic1", 0); final TopicPartition t2 = new TopicPartition("topic1", 1); final TopicPartition t3 = new TopicPartition("topic2", 0); final TopicPartition t4 = new TopicPartition("topic2", 1); final JsonObject t5 = new JsonObject(); context.assertNotEquals(t1, t2); context.assertNotEquals(t1.hashCode(), t2.hashCode()); context.assertNotEquals(t3, t4); context.assertNotEquals(t3.hashCode(), t4.hashCode()); context.assertNotEquals(t3, t5); context.assertNotEquals(t3.hashCode(), t5.hashCode()); context.assertFalse(t1.equals(null)); context.assertFalse(t1.equals(t5)); } }
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> partitionsRevokedHandler(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, io.vertx.core.Handler<java.util.Set<java.util.Map<String, Object>>> handler) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.partitionsRevokedHandler(handler != null ? event -> handler.handle(event != null ? event.stream().map(elt -> elt != null ? io.vertx.core.impl.ConversionHelper.fromJsonObject(elt.toJson()) : null).collect(java.util.stream.Collectors.toSet()) : null) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> partitionsAssignedHandler(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, io.vertx.core.Handler<java.util.Set<java.util.Map<String, Object>>> handler) {
@Test public void testUnequality(final TestContext context) { final TopicPartition t1 = new TopicPartition("topic1", 0); final TopicPartition t2 = new TopicPartition("topic1", 1); final TopicPartition t3 = new TopicPartition("topic2", 0); final TopicPartition t4 = new TopicPartition("topic2", 1); final JsonObject t5 = new JsonObject(); context.assertNotEquals(t1, t2); context.assertNotEquals(t1.hashCode(), t2.hashCode()); context.assertNotEquals(t3, t4); context.assertNotEquals(t3.hashCode(), t4.hashCode()); context.assertNotEquals(t3, t5); context.assertNotEquals(t3.hashCode(), t5.hashCode()); context.assertFalse(t1.equals(null)); context.assertFalse(t1.equals(t5)); } }
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> partitionsAssignedHandler(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, io.vertx.core.Handler<java.util.Set<java.util.Map<String, Object>>> handler) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.partitionsAssignedHandler(handler != null ? event -> handler.handle(event != null ? event.stream().map(elt -> elt != null ? io.vertx.core.impl.ConversionHelper.fromJsonObject(elt.toJson()) : null).collect(java.util.stream.Collectors.toSet()) : null) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seek(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition, long offset) {
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> resume(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.resume(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null, completionHandler != null ? new io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>>() { public void handle(io.vertx.core.AsyncResult<java.lang.Void> ar) { completionHandler.handle(ar.map(event -> io.vertx.core.impl.ConversionHelper.fromObject(event))); } } : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> partitionsRevokedHandler(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, io.vertx.core.Handler<java.util.Set<java.util.Map<String, Object>>> handler) {
public static Map<org.apache.kafka.common.TopicPartition, Long> toTopicPartitionTimes(Map<TopicPartition, Long> topicPartitionTimes) { return topicPartitionTimes.entrySet().stream().collect(Collectors.toMap( e -> new org.apache.kafka.common.TopicPartition(e.getKey().getTopic(), e.getKey().getPartition()), Map.Entry::getValue) ); }
@Test public void testEquality(final TestContext context) { final TopicPartition t1 = new TopicPartition("topic1", 0); final TopicPartition t2 = new TopicPartition("topic1", 0); final TopicPartition t3 = new TopicPartition(null, 0); final TopicPartition t4 = new TopicPartition(null, 0); context.assertEquals(t1, t1); context.assertEquals(t1.hashCode(), t1.hashCode()); context.assertEquals(t1, t2); context.assertEquals(t1.hashCode(), t2.hashCode()); context.assertEquals(t3, t4); context.assertEquals(t3.hashCode(), t4.hashCode()); }
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.seekToBeginning(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) {
public static Map<org.apache.kafka.common.TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata> to(Map<TopicPartition, OffsetAndMetadata> offsets) { return offsets.entrySet().stream().collect(Collectors.toMap( e -> new org.apache.kafka.common.TopicPartition(e.getKey().getTopic(), e.getKey().getPartition()), e -> new org.apache.kafka.clients.consumer.OffsetAndMetadata(e.getValue().getOffset(), e.getValue().getMetadata())) ); }
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> assign(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.assign(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null, completionHandler != null ? new io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>>() { public void handle(io.vertx.core.AsyncResult<java.lang.Void> ar) { completionHandler.handle(ar.map(event -> io.vertx.core.impl.ConversionHelper.fromObject(event))); } } : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> assignment(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.util.Set<java.util.Map<String, Object>>>> handler) {
@Override public void offsetsForTimes(TopicPartition topicPartition, Long timestamp, Handler<AsyncResult<OffsetAndTimestamp>> handler) { Map<TopicPartition, Long> topicPartitions = new HashMap<>(); topicPartitions.put(topicPartition, timestamp); this.stream.offsetsForTimes(Helper.toTopicPartitionTimes(topicPartitions), done -> { if(done.succeeded()) { if (done.result().values().size() == 1) { org.apache.kafka.common.TopicPartition kTopicPartition = new org.apache.kafka.common.TopicPartition (topicPartition.getTopic(), topicPartition.getPartition()); org.apache.kafka.clients.consumer.OffsetAndTimestamp offsetAndTimestamp = done.result().get(kTopicPartition); if(offsetAndTimestamp != null) { OffsetAndTimestamp resultOffsetAndTimestamp = new OffsetAndTimestamp(offsetAndTimestamp.offset(), offsetAndTimestamp.timestamp()); handler.handle(Future.succeededFuture(resultOffsetAndTimestamp)); } // offsetAndTimestamp is null, i.e., search by timestamp did not lead to a result else { handler.handle(Future.succeededFuture()); } } else if (done.result().values().size() == 0) { handler.handle(Future.succeededFuture()); } else { handler.handle(Future.failedFuture("offsetsForTimes should return exactly one OffsetAndTimestamp")); } } else { handler.handle(Future.failedFuture(done.cause())); } }); }
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.seekToBeginning(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null, completionHandler != null ? new io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>>() { public void handle(io.vertx.core.AsyncResult<java.lang.Void> ar) { completionHandler.handle(ar.map(event -> io.vertx.core.impl.ConversionHelper.fromObject(event))); } } : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToEnd(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition) {
log.debug("topic {} partition {}", partition.getTopic(), partition.getPartition()); log.debug("topic {} partition {}", partition.getTopic(), partition.getPartition());
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> resume(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.resume(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> resume(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) {
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToEnd(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.seekToEnd(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToEnd(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) {
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> assign(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.assign(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> assign(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) {
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> pause(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.pause(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> pause(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) {