private void setWaitingToEmit(ConsumerRecords<K, V> consumerRecords) { for (TopicPartition tp : consumerRecords.partitions()) { waitingToEmit.put(tp, new LinkedList<>(consumerRecords.records(tp))); } }
public List<String> format(final ConsumerRecords<String, Bytes> records) { return StreamSupport .stream(records.records(topicName).spliterator(), false) .filter(Objects::nonNull) .filter(r -> r.value() != null) .map((record) -> { if (formatter == null) { formatter = getFormatter(record); } try { return formatter.print(record); } catch (IOException e) { log.warn("Exception formatting record", e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); }
private void ackRetriableOffsetsIfCompactedAway(Map<TopicPartition, Long> earliestRetriableOffsets, ConsumerRecords<K, V> consumerRecords) { for (Entry<TopicPartition, Long> entry : earliestRetriableOffsets.entrySet()) { TopicPartition tp = entry.getKey(); List<ConsumerRecord<K, V>> records = consumerRecords.records(tp); if (!records.isEmpty()) { ConsumerRecord<K, V> record = records.get(0); long seekOffset = entry.getValue(); long earliestReceivedOffset = record.offset(); if (seekOffset < earliestReceivedOffset) { //Since we asked for tuples starting at seekOffset, some retriable records must have been compacted away. //Ack up to the first offset received if the record is not already acked or currently in the topology for (long i = seekOffset; i < earliestReceivedOffset; i++) { KafkaSpoutMessageId msgId = retryService.getMessageId(tp, i); if (!offsetManagers.get(tp).contains(msgId) && !emitted.contains(msgId)) { LOG.debug("Record at offset [{}] appears to have been compacted away from topic [{}], marking as acked", i, tp); retryService.remove(msgId); emitted.add(msgId); ack(msgId); } } } } } }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); //write records to content repository and session if (demarcatorBytes == null) { totalFlowFiles += messages.size(); messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } else { writeData(getProcessSession(), messages, partition); } } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
levelRecords.put(partition, partitionRecords); List<ConsumerRecord<String, byte[]>> records = reconsumeRecords.records(partition); partitionRecords.addAll(records);
@Override public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) { onConsumeCount++; if (throwExceptionOnConsume) throw new KafkaException("Injected exception in FilterConsumerInterceptor.onConsume."); // filters out topic/partitions with partition == FILTER_PARTITION Map<TopicPartition, List<ConsumerRecord<K, V>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { if (tp.partition() != filterPartition) recordMap.put(tp, records.records(tp)); } return new ConsumerRecords<K, V>(recordMap); }
final List<ConsumerRecord<K, V>> records = consumer.poll(pollTimeoutMs).records(currBatchTp); LOG.debug("Polled [{}] records from Kafka.", records.size());
@Override @SuppressWarnings("deprecation") public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) { // This will ensure that we get the cluster metadata when onConsume is called for the first time // as subsequent compareAndSet operations will fail. CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { List<ConsumerRecord<String, String>> lst = new ArrayList<>(); for (ConsumerRecord<String, String> record: records.records(tp)) { lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT))); } recordMap.put(tp, lst); } return new ConsumerRecords<String, String>(recordMap); }
private void checkData() { Set<TopicPartition> assignment = Collections.singleton(new TopicPartition(TOPIC, 0)); consumer.assign(assignment); consumer.seekToBeginning(assignment); long numRecords = 0; boolean emptyPoll = false; while (numRecords < RECORD_NUMBER && !emptyPoll) { ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(1000)); Assert.assertFalse(records.records(new TopicPartition(TOPIC, 0)) .stream() .anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0, consumerRecord.timestamp(), consumerRecord.value(), consumerRecord.key())))); emptyPoll = records.isEmpty(); numRecords += records.count(); } Assert.assertEquals(RECORD_NUMBER, numRecords); } }
records.records(partition.getKafkaPartitionHandle());
records.records(partition.getKafkaPartitionHandle());
ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(1000)); Assert.assertFalse(records.records(new TopicPartition(topic, 0)) .stream() .anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0,
while (numRecords < RECORD_NUMBER) { ConsumerRecords<byte[], byte[]> consumerRecords = consumer.poll(Duration.ofMillis(1000)); actualRecords.addAll(consumerRecords.records(new TopicPartition(TOPIC, 0))); numRecords += consumerRecords.count();
for (TopicPartition partition : records.partitions()) { String topic = partition.topic(); List<ConsumerRecord<K, V>> recordsInPartition = records.records(partition); for (int i = 0, length = recordsInPartition.size(); i < length; i++) { ConsumerRecord<K, V> record = recordsInPartition.get(i);
@Override public void handle(Exception thrownException, ConsumerRecords<?, ?> data, Consumer<?, ?> consumer, MessageListenerContainer container) { data.partitions() .stream() .collect( Collectors.toMap(tp -> tp, tp -> data.records(tp).get(0).offset(), (u, v) -> (long) v, LinkedHashMap::new)) .forEach(consumer::seek); throw new KafkaException("Seek to current after exception", thrownException); }
/** * Poll the consumer, expecting a single record for the specified topic. * @param consumer the consumer. * @param topic the topic. * @param timeout max time in milliseconds to wait for records; forwarded to {@link Consumer#poll(long)}. * @param <K> the key type. * @param <V> the value type. * @return the record. * @throws org.junit.ComparisonFailure if exactly one record is not received. * @since 2.0 */ public static <K, V> ConsumerRecord<K, V> getSingleRecord(Consumer<K, V> consumer, String topic, long timeout) { ConsumerRecords<K, V> received = getRecords(consumer, timeout); assertThat(received.count()).as("Incorrect results returned", received.count()).isEqualTo(1); return received.records(topic).iterator().next(); }