/** * Get the record at the given index * @param index the index of the record to get * @return */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumerRecord<K, V> recordAt(int index) { io.vertx.rxjava.kafka.client.consumer.KafkaConsumerRecord<K, V> ret = io.vertx.rxjava.kafka.client.consumer.KafkaConsumerRecord.newInstance(delegate.recordAt(index), __typeArg_0, __typeArg_1); return ret; }
/** * @return the total number of records in this batch */ public int size() { int ret = delegate.size(); return ret; }
/** * @return whether this batch contains any records */ public boolean isEmpty() { boolean ret = delegate.isEmpty(); return ret; }
@Override public Buffer toMessages(KafkaConsumerRecords<String, byte[]> records) { JsonArray jsonArray = new JsonArray(); for (int i = 0; i <records.size(); i++){ JsonObject jsonObject = new JsonObject(); jsonObject.put("topic", records.recordAt(i).topic()); jsonObject.put("key", records.recordAt(i).key()); jsonObject.put("value", new String(records.recordAt(i).value())); jsonObject.put("partition", records.recordAt(i).partition()); jsonObject.put("offset", records.recordAt(i).offset()); jsonArray.add(jsonObject); } return jsonArray.toBuffer(); } }
batchHandler.handler(ar -> wrappedConsumer.close()); wrappedConsumer.batchHandler(records -> { ctx.assertEquals(numMessages, records.size()); for (int i = 0; i < records.size(); i++) { KafkaConsumerRecord<Object, Object> record = records.recordAt(i); int dec = count.decrementAndGet(); if (dec >= 0) {
batchHandler.handler(ar -> wrappedConsumer.close()); wrappedConsumer.batchHandler(records -> { ctx.assertEquals(numMessages, records.size()); for (int i = 0; i < records.size(); i++) { KafkaConsumerRecord<Object, Object> record = records.recordAt(i); int dec = count.decrementAndGet(); if (dec >= 0) {
/** * @return the total number of records in this batch */ public int size() { int ret = delegate.size(); return ret; }
/** * Get the record at the given index * @param index the index of the record to get * @return */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumerRecord<K, V> recordAt(int index) { io.vertx.rxjava.kafka.client.consumer.KafkaConsumerRecord<K, V> ret = io.vertx.rxjava.kafka.client.consumer.KafkaConsumerRecord.newInstance(delegate.recordAt(index), __typeArg_0, __typeArg_1); return ret; }
/** * @return whether this batch contains any records */ public boolean isEmpty() { boolean ret = delegate.isEmpty(); return ret; }
/** * Callback to process a kafka records batch * * @param records The records batch */ private void handleKafkaBatch(KafkaConsumerRecords<K, V> records) { this.recordIndex = 0; this.batchSize = records.size(); }
consumer.poll(100, pollResult -> { if (pollResult.succeeded()) { if (pollResult.result().size() > 0) { ctx.fail(); } else {
consumer.poll(100, pollResult -> { if (pollResult.succeeded()) { if (pollResult.result().size() > 0) { ctx.fail(); } else {
consumer.poll(100, pollResult -> { if (pollResult.succeeded()) { if (count.updateAndGet(o -> count.get() - pollResult.result().size()) == 0) { vertx.cancelTimer(t); done.complete();
consumer.poll(100, pollResult -> { if (pollResult.succeeded()) { if (count.updateAndGet(o -> count.get() - pollResult.result().size()) == 0) { vertx.cancelTimer(t); done.complete();