congrats Icon
New! Tabnine Pro 14-day free trial
Start a free trial
Tabnine Logo
ConsumerRecord
Code IndexAdd Tabnine to your IDE (free)

How to use
ConsumerRecord
in
org.apache.kafka.clients.consumer

Best Java code snippets using org.apache.kafka.clients.consumer.ConsumerRecord (Showing top 20 results out of 2,565)

Refine searchRefine arrow

  • KafkaConsumer
  • ConsumerRecords
  • TopicPartition
origin: apache/hive

void set(ConsumerRecord<byte[], byte[]> consumerRecord) {
 this.partition = consumerRecord.partition();
 this.timestamp = consumerRecord.timestamp();
 this.offset = consumerRecord.offset();
 this.value = consumerRecord.value();
 this.recordKey = consumerRecord.key();
}
origin: linkedin/kafka-monitor

@Override
public BaseConsumerRecord receive() {
 if (_recordIter == null || !_recordIter.hasNext())
  _recordIter = _consumer.poll(Long.MAX_VALUE).iterator();
 ConsumerRecord<String, String> record = _recordIter.next();
 return new BaseConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.value());
}
origin: apache/storm

public KafkaSpoutMessageId(ConsumerRecord<?, ?> consumerRecord, boolean nullTuple) {
  this(new TopicPartition(consumerRecord.topic(), consumerRecord.partition()), consumerRecord.offset(), nullTuple);
}
origin: apache/nifi

private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding) {
  this.initialOffset = initialRecord.offset();
  this.partition = topicPartition.partition();
  this.topic = topicPartition.topic();
  this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
origin: apache/hive

 private void checkData() {
  Set<TopicPartition> assignment = Collections.singleton(new TopicPartition(TOPIC, 0));
  consumer.assign(assignment);
  consumer.seekToBeginning(assignment);
  long numRecords = 0;
  boolean emptyPoll = false;
  while (numRecords < RECORD_NUMBER && !emptyPoll) {
   ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(1000));

   Assert.assertFalse(records.records(new TopicPartition(TOPIC, 0))
     .stream()
     .anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0,
       consumerRecord.timestamp(),
       consumerRecord.value(),
       consumerRecord.key()))));

   emptyPoll = records.isEmpty();
   numRecords += records.count();
  }
  Assert.assertEquals(RECORD_NUMBER, numRecords);
 }
}
origin: apache/flume

kafkaKey = message.key();
     new ByteArrayInputStream(message.value());
 decoder = DecoderFactory.get().directBinaryDecoder(in, decoder);
 if (!reader.isPresent()) {
 headers = toStringMap(avroevent.getHeaders());
} else {
 eventBody = message.value();
 headers.clear();
 headers = new HashMap<String, String>(4);
 headers.put(topicHeader, message.topic());
   String.valueOf(message.partition()));
   String.valueOf(message.offset()));
 if (LogPrivacyUtil.allowLogRawData()) {
  log.trace("Topic: {} Partition: {} Message: {}", new String[]{
    message.topic(),
    String.valueOf(message.partition()),
    new String(eventBody)
  });
 } else {
  log.trace("Topic: {} Partition: {} Message arrived.",
    message.topic(),
    String.valueOf(message.partition()));
origin: apache/flume

/**
 * Return a map containing one List of records per partition.
 * This internally creates a Kafka Consumer using the provided consumer properties.
 *
 * @param numPtns
 * @param consumerProperties
 * @return A Map of Partitions(Integer) and the resulting List of messages (byte[]) retrieved
 */
public static Map<Integer, List<byte[]>> retrieveRecordsFromPartitions(String topic, int numPtns,
                                 Properties consumerProperties) {
 Map<Integer, List<byte[]>> resultsMap = new HashMap<Integer, List<byte[]>>();
 for (int i = 0; i < numPtns; i++) {
  List<byte[]> partitionResults = new ArrayList<byte[]>();
  resultsMap.put(i, partitionResults);
  KafkaConsumer<String, byte[]> consumer =
    new KafkaConsumer<String, byte[]>(consumerProperties);
  TopicPartition partition = new TopicPartition(topic, i);
  consumer.assign(Arrays.asList(partition));
  ConsumerRecords<String, byte[]> records = consumer.poll(1000);
  for (ConsumerRecord<String, byte[]> record : records) {
   partitionResults.add(record.value());
  }
  consumer.close();
 }
 return resultsMap;
}
origin: apache/kafka

@Override
@SuppressWarnings("deprecation")
public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
  // This will ensure that we get the cluster metadata when onConsume is called for the first time
  // as subsequent compareAndSet operations will fail.
  CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get());
  Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
  for (TopicPartition tp : records.partitions()) {
    List<ConsumerRecord<String, String>> lst = new ArrayList<>();
    for (ConsumerRecord<String, String> record: records.records(tp)) {
      lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(),
                     record.timestamp(), record.timestampType(),
                     record.checksum(), record.serializedKeySize(),
                     record.serializedValueSize(),
                     record.key(), record.value().toUpperCase(Locale.ROOT)));
    }
    recordMap.put(tp, lst);
  }
  return new ConsumerRecords<String, String>(recordMap);
}
origin: alibaba/canal

/**
 * 获取Kafka消息,不确认
 *
 * @param timeout
 * @param unit
 * @param offset  消息偏移地址(-1为不偏移)
 * @return
 * @throws CanalClientException
 */
public List<KafkaMessage> getListWithoutAck(Long timeout, TimeUnit unit, long offset) throws CanalClientException {
  waitClientRunning();
  if (!running) {
    return Lists.newArrayList();
  }
  if (offset > -1) {
    TopicPartition tp = new TopicPartition(topic, partition == null ? 0 : partition);
    kafkaConsumer.seek(tp, offset);
  }
  ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));
  if (!records.isEmpty()) {
    List<KafkaMessage> messages = new ArrayList<>();
    for (ConsumerRecord<String, Message> record : records) {
      KafkaMessage message = new KafkaMessage(record.value(), record.offset());
      messages.add(message);
    }
    return messages;
  }
  return Lists.newArrayList();
}
origin: confluentinc/kafka-streams-examples

 private static void consumeOutput(final String bootstrapServers) {
  final Properties consumerProps = new Properties();
  consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "session-windows-consumer");
  consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Serdes.String().deserializer().getClass());
  consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Serdes.Long().deserializer().getClass());

  final KafkaConsumer<String, Long> consumer = new KafkaConsumer<>(consumerProps);
  consumer.subscribe(Collections.singleton(SessionWindowsExample.PLAY_EVENTS_PER_SESSION));
  int received = 0;
  while(received < NUM_RECORDS_SENT) {
   final ConsumerRecords<String, Long> records = consumer.poll(Long.MAX_VALUE);
   records.forEach(record -> System.out.println(record.key() + " = " + record.value()));
   received += records.count();
  }

  consumer.close();
 }
}
origin: apache/flume

if ( logger.isTraceEnabled() ) {
 logger.trace("Assignment during take: {}",
   consumerAndRecords.get().consumer.assignment().toString());
  e = deserializeValue(record.value(), parseAsFlumeEvent);
  TopicPartition tp = new TopicPartition(record.topic(), record.partition());
  OffsetAndMetadata oam = new OffsetAndMetadata(record.offset() + 1, batchUUID);
  consumerAndRecords.get().saveOffsets(tp,oam);
  if (record.key() != null) {
   e.getHeaders().put(KEY_HEADER, record.key());
     new Object[] {getName(), record.partition(), record.offset()});
origin: apache/kafka

@Test
@SuppressWarnings("deprecation")
public void testOldConstructor() {
  String topic = "topic";
  int partition = 0;
  long offset = 23;
  String key = "key";
  String value = "value";
  ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, key, value);
  assertEquals(topic, record.topic());
  assertEquals(partition, record.partition());
  assertEquals(offset, record.offset());
  assertEquals(key, record.key());
  assertEquals(value, record.value());
  assertEquals(TimestampType.NO_TIMESTAMP_TYPE, record.timestampType());
  assertEquals(ConsumerRecord.NO_TIMESTAMP, record.timestamp());
  assertEquals(ConsumerRecord.NULL_CHECKSUM, record.checksum());
  assertEquals(ConsumerRecord.NULL_SIZE, record.serializedKeySize());
  assertEquals(ConsumerRecord.NULL_SIZE, record.serializedValueSize());
  assertEquals(Optional.empty(), record.leaderEpoch());
  assertEquals(new RecordHeaders(), record.headers());
}
origin: debezium/debezium

@Override
protected void recoverRecords(Consumer<HistoryRecord> records) {
  try (KafkaConsumer<String, String> historyConsumer = new KafkaConsumer<>(consumerConfig.asProperties());) {
    historyConsumer.subscribe(Collect.arrayListOf(topicName));
      logger.debug("End offset of database history topic is {}", endOffset);
      ConsumerRecords<String, String> recoveredRecords = historyConsumer.poll(this.pollIntervalMs);
      int numRecordsProcessed = 0;
          if (lastProcessedOffset < record.offset()) {
            if (record.value() == null) {
              logger.warn("Skipping null database history record. " +
                  "This is often not an issue, but if it happens repeatedly please check the '{}' topic.", topicName);
            } else {
              HistoryRecord recordObj = new HistoryRecord(reader.read(record.value()));
              logger.trace("Recovering database history: {}", recordObj);
              if (recordObj == null || !recordObj.isValid()) {
            lastProcessedOffset = record.offset();
            ++numRecordsProcessed;
origin: OryxProject/oryx

@Override
protected KeyMessage<K,V> computeNext() {
 if (iterator == null || !iterator.hasNext()) {
  try {
   long timeout = MIN_POLL_MS;
   ConsumerRecords<K, V> records;
   while ((records = consumer.poll(timeout)).isEmpty()) {
    timeout = Math.min(MAX_POLL_MS, timeout * 2);
   }
   iterator = records.iterator();
  } catch (Exception e) {
   consumer.close();
   return endOfData();
  }
 }
 ConsumerRecord<K,V> mm = iterator.next();
 return new KeyMessageImpl<>(mm.key(), mm.value());
}
origin: linkedin/cruise-control

try {
 prepareConsumerOffset();
 Map<TopicPartition, Long> beginningOffsets = _consumer.beginningOffsets(_consumer.assignment());
 Map<TopicPartition, Long> endOffsets = _consumer.endOffsets(_consumer.assignment());
 LOG.debug("Loading beginning offsets: {}, loading end offsets: {}", beginningOffsets, endOffsets);
 for (Map.Entry<TopicPartition, Long> entry : beginningOffsets.entrySet()) {
   for (ConsumerRecord<byte[], byte[]> record : consumerRecords) {
    try {
     if (record.topic().equals(_partitionMetricSampleStoreTopic)) {
      PartitionMetricSample sample = PartitionMetricSample.fromBytes(record.value());
      partitionMetricSamples.add(sample);
      LOG.trace("Loaded partition metric sample {}", sample);
     } else if (record.topic().equals(_brokerMetricSampleStoreTopic)) {
      BrokerMetricSample sample = BrokerMetricSample.fromBytes(record.value());
      sample.close(record.timestamp());
      brokerMetricSamples.add(sample);
      LOG.trace("Loaded broker metric sample {}", sample);
    _numPartitionMetricSamples.getAndAdd(partitionMetricSamples.size());
    _numBrokerMetricSamples.getAndAdd(brokerMetricSamples.size());
    _loadingProgress = (double) _numLoadedSamples.addAndGet(consumerRecords.count()) / _totalSamples.get();
origin: alibaba/canal

@Override
public List<Message> getListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException {
  waitClientRunning();
  if (!running) {
    return Lists.newArrayList();
  }
  ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));
  currentOffsets.clear();
  for (TopicPartition topicPartition : records.partitions()) {
    currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition));
  }
  if (!records.isEmpty()) {
    List<Message> messages = new ArrayList<>();
    for (ConsumerRecord<String, Message> record : records) {
      messages.add(record.value());
    }
    return messages;
  }
  return Lists.newArrayList();
}
origin: spring-projects/spring-kafka

  this.headerMapper.toHeaders(record.headers(), rawHeaders);
        + KafkaHeaders.NATIVE_HEADERS);
  rawHeaders.put(KafkaHeaders.NATIVE_HEADERS, record.headers());
rawHeaders.put(KafkaHeaders.RECEIVED_MESSAGE_KEY, record.key());
rawHeaders.put(KafkaHeaders.RECEIVED_TOPIC, record.topic());
rawHeaders.put(KafkaHeaders.RECEIVED_PARTITION_ID, record.partition());
rawHeaders.put(KafkaHeaders.OFFSET, record.offset());
rawHeaders.put(KafkaHeaders.TIMESTAMP_TYPE, record.timestampType().name());
rawHeaders.put(KafkaHeaders.RECEIVED_TIMESTAMP, record.timestamp());
origin: OryxProject/oryx

JavaInputDStream<ConsumerRecord<K,M>> kafkaDStream = buildInputDStream(streamingContext);
JavaPairDStream<K,M> pairDStream =
  kafkaDStream.mapToPair(mAndM -> new Tuple2<>(mAndM.key(), mAndM.value()));
KafkaConsumer<String,U> consumer = new KafkaConsumer<>(
  ConfigUtils.keyValueToProperties(
    "group.id", "OryxGroup-" + getLayerName() + "-" + UUID.randomUUID(),
consumer.subscribe(Collections.singletonList(updateTopic));
consumerIterator = new ConsumeDataIterator<>(consumer);
origin: apache/hive

@Test public void testWriteReadFields2() throws IOException {
 ConsumerRecord<byte[], byte[]> record = new ConsumerRecord("topic", 0, 3L, "key".getBytes(), "value".getBytes());
 KafkaWritable kafkaWritable =
   new KafkaWritable(record.partition(),
     record.offset(),
     record.timestamp(),
     record.value(), "thisKey".getBytes());
 ByteArrayOutputStream baos = new ByteArrayOutputStream();
 DataOutputStream w = new DataOutputStream(baos);
 kafkaWritable.write(w);
 w.flush();
 ByteArrayInputStream input = new ByteArrayInputStream(baos.toByteArray());
 DataInputStream inputStream = new DataInputStream(input);
 KafkaWritable actualKafkaWritable = new KafkaWritable();
 actualKafkaWritable.readFields(inputStream);
 Assert.assertEquals(kafkaWritable, actualKafkaWritable);
}
origin: sixt/ja-micro

@SuppressWarnings(value = "unchecked")
protected void readMessages() {
  logger.trace("Reading messages from Kafka...");
  checkForMessageThrottling();
  ConsumerRecords<String, String> records = realConsumer.poll(pollTime);
  if (records != null) {
    messagesReadMetric.incSuccess(records.count());
    for (ConsumerRecord<String, String> record : records) {
      String rawMessage = record.value();
      logger.trace(append("rawMessage", rawMessage), "Read Kafka message ({}/{})",
          record.partition(), record.offset());
      messageQueue.add(record);
    }
  }
}
org.apache.kafka.clients.consumerConsumerRecord

Javadoc

A key/value pair to be received from Kafka. This also consists of a topic name and a partition number from which the record is being received, an offset that points to the record in a Kafka partition, and a timestamp as marked by the corresponding ProducerRecord.

Most used methods

  • value
    The value
  • key
    The key (or null if no key is specified)
  • offset
    The position of this record in the corresponding Kafka partition.
  • partition
    The partition from which this record is received
  • topic
    The topic this record is received from
  • timestamp
  • headers
  • <init>
  • timestampType
  • serializedValueSize
  • serializedKeySize
  • checksum
  • serializedKeySize,
  • checksum,
  • toString,
  • leaderEpoch

Popular in Java

  • Updating database using SQL prepared statement
  • getSharedPreferences (Context)
  • getSystemService (Context)
  • notifyDataSetChanged (ArrayAdapter)
  • Component (java.awt)
    A component is an object having a graphical representation that can be displayed on the screen and t
  • Graphics2D (java.awt)
    This Graphics2D class extends the Graphics class to provide more sophisticated control overgraphics
  • TimeUnit (java.util.concurrent)
    A TimeUnit represents time durations at a given unit of granularity and provides utility methods to
  • ServletException (javax.servlet)
    Defines a general exception a servlet can throw when it encounters difficulty.
  • XPath (javax.xml.xpath)
    XPath provides access to the XPath evaluation environment and expressions. Evaluation of XPath Expr
  • SAXParseException (org.xml.sax)
    Encapsulate an XML parse error or warning.> This module, both source code and documentation, is in t
  • Sublime Text for Python
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now