Refine search
@Override public BaseConsumerRecord receive() { if (_recordIter == null || !_recordIter.hasNext()) _recordIter = _consumer.poll(Long.MAX_VALUE).iterator(); ConsumerRecord<String, String> record = _recordIter.next(); return new BaseConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.value()); }
public KafkaSpoutMessageId(ConsumerRecord<?, ?> consumerRecord, boolean nullTuple) { this(new TopicPartition(consumerRecord.topic(), consumerRecord.partition()), consumerRecord.offset(), nullTuple); }
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding) { this.initialOffset = initialRecord.offset(); this.partition = topicPartition.partition(); this.topic = topicPartition.topic(); this.key = encodeKafkaKey(initialRecord.key(), keyEncoding); }
private void checkData() { Set<TopicPartition> assignment = Collections.singleton(new TopicPartition(TOPIC, 0)); consumer.assign(assignment); consumer.seekToBeginning(assignment); long numRecords = 0; boolean emptyPoll = false; while (numRecords < RECORD_NUMBER && !emptyPoll) { ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(1000)); Assert.assertFalse(records.records(new TopicPartition(TOPIC, 0)) .stream() .anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0, consumerRecord.timestamp(), consumerRecord.value(), consumerRecord.key())))); emptyPoll = records.isEmpty(); numRecords += records.count(); } Assert.assertEquals(RECORD_NUMBER, numRecords); } }
kafkaKey = message.key(); new ByteArrayInputStream(message.value()); decoder = DecoderFactory.get().directBinaryDecoder(in, decoder); if (!reader.isPresent()) { headers = toStringMap(avroevent.getHeaders()); } else { eventBody = message.value(); headers.clear(); headers = new HashMap<String, String>(4); headers.put(topicHeader, message.topic()); String.valueOf(message.partition())); String.valueOf(message.offset())); if (LogPrivacyUtil.allowLogRawData()) { log.trace("Topic: {} Partition: {} Message: {}", new String[]{ message.topic(), String.valueOf(message.partition()), new String(eventBody) }); } else { log.trace("Topic: {} Partition: {} Message arrived.", message.topic(), String.valueOf(message.partition()));
/** * Return a map containing one List of records per partition. * This internally creates a Kafka Consumer using the provided consumer properties. * * @param numPtns * @param consumerProperties * @return A Map of Partitions(Integer) and the resulting List of messages (byte[]) retrieved */ public static Map<Integer, List<byte[]>> retrieveRecordsFromPartitions(String topic, int numPtns, Properties consumerProperties) { Map<Integer, List<byte[]>> resultsMap = new HashMap<Integer, List<byte[]>>(); for (int i = 0; i < numPtns; i++) { List<byte[]> partitionResults = new ArrayList<byte[]>(); resultsMap.put(i, partitionResults); KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<String, byte[]>(consumerProperties); TopicPartition partition = new TopicPartition(topic, i); consumer.assign(Arrays.asList(partition)); ConsumerRecords<String, byte[]> records = consumer.poll(1000); for (ConsumerRecord<String, byte[]> record : records) { partitionResults.add(record.value()); } consumer.close(); } return resultsMap; }
@Override @SuppressWarnings("deprecation") public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) { // This will ensure that we get the cluster metadata when onConsume is called for the first time // as subsequent compareAndSet operations will fail. CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { List<ConsumerRecord<String, String>> lst = new ArrayList<>(); for (ConsumerRecord<String, String> record: records.records(tp)) { lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT))); } recordMap.put(tp, lst); } return new ConsumerRecords<String, String>(recordMap); }
/** * 获取Kafka消息,不确认 * * @param timeout * @param unit * @param offset 消息偏移地址(-1为不偏移) * @return * @throws CanalClientException */ public List<KafkaMessage> getListWithoutAck(Long timeout, TimeUnit unit, long offset) throws CanalClientException { waitClientRunning(); if (!running) { return Lists.newArrayList(); } if (offset > -1) { TopicPartition tp = new TopicPartition(topic, partition == null ? 0 : partition); kafkaConsumer.seek(tp, offset); } ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout)); if (!records.isEmpty()) { List<KafkaMessage> messages = new ArrayList<>(); for (ConsumerRecord<String, Message> record : records) { KafkaMessage message = new KafkaMessage(record.value(), record.offset()); messages.add(message); } return messages; } return Lists.newArrayList(); }
private static void consumeOutput(final String bootstrapServers) { final Properties consumerProps = new Properties(); consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "session-windows-consumer"); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Serdes.String().deserializer().getClass()); consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Serdes.Long().deserializer().getClass()); final KafkaConsumer<String, Long> consumer = new KafkaConsumer<>(consumerProps); consumer.subscribe(Collections.singleton(SessionWindowsExample.PLAY_EVENTS_PER_SESSION)); int received = 0; while(received < NUM_RECORDS_SENT) { final ConsumerRecords<String, Long> records = consumer.poll(Long.MAX_VALUE); records.forEach(record -> System.out.println(record.key() + " = " + record.value())); received += records.count(); } consumer.close(); } }
if ( logger.isTraceEnabled() ) { logger.trace("Assignment during take: {}", consumerAndRecords.get().consumer.assignment().toString()); e = deserializeValue(record.value(), parseAsFlumeEvent); TopicPartition tp = new TopicPartition(record.topic(), record.partition()); OffsetAndMetadata oam = new OffsetAndMetadata(record.offset() + 1, batchUUID); consumerAndRecords.get().saveOffsets(tp,oam); if (record.key() != null) { e.getHeaders().put(KEY_HEADER, record.key()); new Object[] {getName(), record.partition(), record.offset()});
@Test @SuppressWarnings("deprecation") public void testOldConstructor() { String topic = "topic"; int partition = 0; long offset = 23; String key = "key"; String value = "value"; ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, key, value); assertEquals(topic, record.topic()); assertEquals(partition, record.partition()); assertEquals(offset, record.offset()); assertEquals(key, record.key()); assertEquals(value, record.value()); assertEquals(TimestampType.NO_TIMESTAMP_TYPE, record.timestampType()); assertEquals(ConsumerRecord.NO_TIMESTAMP, record.timestamp()); assertEquals(ConsumerRecord.NULL_CHECKSUM, record.checksum()); assertEquals(ConsumerRecord.NULL_SIZE, record.serializedKeySize()); assertEquals(ConsumerRecord.NULL_SIZE, record.serializedValueSize()); assertEquals(Optional.empty(), record.leaderEpoch()); assertEquals(new RecordHeaders(), record.headers()); }
@Override protected void recoverRecords(Consumer<HistoryRecord> records) { try (KafkaConsumer<String, String> historyConsumer = new KafkaConsumer<>(consumerConfig.asProperties());) { historyConsumer.subscribe(Collect.arrayListOf(topicName)); logger.debug("End offset of database history topic is {}", endOffset); ConsumerRecords<String, String> recoveredRecords = historyConsumer.poll(this.pollIntervalMs); int numRecordsProcessed = 0; if (lastProcessedOffset < record.offset()) { if (record.value() == null) { logger.warn("Skipping null database history record. " + "This is often not an issue, but if it happens repeatedly please check the '{}' topic.", topicName); } else { HistoryRecord recordObj = new HistoryRecord(reader.read(record.value())); logger.trace("Recovering database history: {}", recordObj); if (recordObj == null || !recordObj.isValid()) { lastProcessedOffset = record.offset(); ++numRecordsProcessed;
@Override protected KeyMessage<K,V> computeNext() { if (iterator == null || !iterator.hasNext()) { try { long timeout = MIN_POLL_MS; ConsumerRecords<K, V> records; while ((records = consumer.poll(timeout)).isEmpty()) { timeout = Math.min(MAX_POLL_MS, timeout * 2); } iterator = records.iterator(); } catch (Exception e) { consumer.close(); return endOfData(); } } ConsumerRecord<K,V> mm = iterator.next(); return new KeyMessageImpl<>(mm.key(), mm.value()); }
try { prepareConsumerOffset(); Map<TopicPartition, Long> beginningOffsets = _consumer.beginningOffsets(_consumer.assignment()); Map<TopicPartition, Long> endOffsets = _consumer.endOffsets(_consumer.assignment()); LOG.debug("Loading beginning offsets: {}, loading end offsets: {}", beginningOffsets, endOffsets); for (Map.Entry<TopicPartition, Long> entry : beginningOffsets.entrySet()) { for (ConsumerRecord<byte[], byte[]> record : consumerRecords) { try { if (record.topic().equals(_partitionMetricSampleStoreTopic)) { PartitionMetricSample sample = PartitionMetricSample.fromBytes(record.value()); partitionMetricSamples.add(sample); LOG.trace("Loaded partition metric sample {}", sample); } else if (record.topic().equals(_brokerMetricSampleStoreTopic)) { BrokerMetricSample sample = BrokerMetricSample.fromBytes(record.value()); sample.close(record.timestamp()); brokerMetricSamples.add(sample); LOG.trace("Loaded broker metric sample {}", sample); _numPartitionMetricSamples.getAndAdd(partitionMetricSamples.size()); _numBrokerMetricSamples.getAndAdd(brokerMetricSamples.size()); _loadingProgress = (double) _numLoadedSamples.addAndGet(consumerRecords.count()) / _totalSamples.get();
@Override public List<Message> getListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException { waitClientRunning(); if (!running) { return Lists.newArrayList(); } ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout)); currentOffsets.clear(); for (TopicPartition topicPartition : records.partitions()) { currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition)); } if (!records.isEmpty()) { List<Message> messages = new ArrayList<>(); for (ConsumerRecord<String, Message> record : records) { messages.add(record.value()); } return messages; } return Lists.newArrayList(); }
this.headerMapper.toHeaders(record.headers(), rawHeaders); + KafkaHeaders.NATIVE_HEADERS); rawHeaders.put(KafkaHeaders.NATIVE_HEADERS, record.headers()); rawHeaders.put(KafkaHeaders.RECEIVED_MESSAGE_KEY, record.key()); rawHeaders.put(KafkaHeaders.RECEIVED_TOPIC, record.topic()); rawHeaders.put(KafkaHeaders.RECEIVED_PARTITION_ID, record.partition()); rawHeaders.put(KafkaHeaders.OFFSET, record.offset()); rawHeaders.put(KafkaHeaders.TIMESTAMP_TYPE, record.timestampType().name()); rawHeaders.put(KafkaHeaders.RECEIVED_TIMESTAMP, record.timestamp());
JavaInputDStream<ConsumerRecord<K,M>> kafkaDStream = buildInputDStream(streamingContext); JavaPairDStream<K,M> pairDStream = kafkaDStream.mapToPair(mAndM -> new Tuple2<>(mAndM.key(), mAndM.value())); KafkaConsumer<String,U> consumer = new KafkaConsumer<>( ConfigUtils.keyValueToProperties( "group.id", "OryxGroup-" + getLayerName() + "-" + UUID.randomUUID(), consumer.subscribe(Collections.singletonList(updateTopic)); consumerIterator = new ConsumeDataIterator<>(consumer);
@Test public void testWriteReadFields2() throws IOException { ConsumerRecord<byte[], byte[]> record = new ConsumerRecord("topic", 0, 3L, "key".getBytes(), "value".getBytes()); KafkaWritable kafkaWritable = new KafkaWritable(record.partition(), record.offset(), record.timestamp(), record.value(), "thisKey".getBytes()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream w = new DataOutputStream(baos); kafkaWritable.write(w); w.flush(); ByteArrayInputStream input = new ByteArrayInputStream(baos.toByteArray()); DataInputStream inputStream = new DataInputStream(input); KafkaWritable actualKafkaWritable = new KafkaWritable(); actualKafkaWritable.readFields(inputStream); Assert.assertEquals(kafkaWritable, actualKafkaWritable); }
@SuppressWarnings(value = "unchecked") protected void readMessages() { logger.trace("Reading messages from Kafka..."); checkForMessageThrottling(); ConsumerRecords<String, String> records = realConsumer.poll(pollTime); if (records != null) { messagesReadMetric.incSuccess(records.count()); for (ConsumerRecord<String, String> record : records) { String rawMessage = record.value(); logger.trace(append("rawMessage", rawMessage), "Read Kafka message ({}/{})", record.partition(), record.offset()); messageQueue.add(record); } } }