protected void emitRecord( T record, KafkaTopicPartitionState<TopicPartition> partition, long offset, ConsumerRecord<?, ?> consumerRecord) throws Exception { emitRecordWithTimestamp(record, partition, offset, consumerRecord.timestamp()); }
@Override @SuppressWarnings("deprecation") public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) { // This will ensure that we get the cluster metadata when onConsume is called for the first time // as subsequent compareAndSet operations will fail. CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { List<ConsumerRecord<String, String>> lst = new ArrayList<>(); for (ConsumerRecord<String, String> record: records.records(tp)) { lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT))); } recordMap.put(tp, lst); } return new ConsumerRecords<String, String>(recordMap); }
@Override protected void emitRecord( T record, KafkaTopicPartitionState<TopicPartition> partition, long offset, ConsumerRecord<?, ?> consumerRecord) throws Exception { // we attach the Kafka 0.10 timestamp here emitRecordWithTimestamp(record, partition, offset, consumerRecord.timestamp()); }
@Override public boolean matches(ConsumerRecord<?, ?> value) { return value != null && (value.timestampType() == this.type && value.timestamp() == this.ts); }
void set(ConsumerRecord<byte[], byte[]> consumerRecord) { this.partition = consumerRecord.partition(); this.timestamp = consumerRecord.timestamp(); this.offset = consumerRecord.offset(); this.value = consumerRecord.value(); this.recordKey = consumerRecord.key(); }
@Override protected boolean matches(Object item, Description mismatchDescription) { @SuppressWarnings(UNCHECKED) ConsumerRecord<Object, Object> record = (ConsumerRecord<Object, Object>) item; boolean matches = record != null && (record.timestampType() == this.type && record.timestamp() == this.ts); if (!matches) { mismatchDescription.appendText(IS_SPACE).appendValue(record); } return matches; }
@Override public void readMessage(ConsumerRecord<?, ?> record) { try { byte[] recordArray = (byte[]) record.value(); JsonObject jsonObj = (new JsonParser()).parse(new String(recordArray, Charsets.UTF_8)).getAsJsonObject(); jsonObj.addProperty(KAFKA_TOPIC.getFieldName(), record.topic()); jsonObj.addProperty(KAFKA_PARTITION_ID.getFieldName(), record.partition()); jsonObj.addProperty(KAFKA_OFFSET.getFieldName(), record.offset()); jsonObj.addProperty(KAFKA_TIMESTAMP.getFieldName(), record.timestamp()); jsonObj.addProperty(KAFKA_MSG_KEY.getFieldName(), record.key() != null ? record.key().toString() : null); jsonReader.setSource(jsonObj.toString().getBytes(Charsets.UTF_8)); jsonReader.write(writer); } catch (IOException e) { throw UserException.dataReadError(e).message(e.getMessage()) .addContext("MessageReader", JsonMessageReader.class.getName()).build(logger); } }
@Test @SuppressWarnings("deprecation") public void testOldConstructor() { String topic = "topic"; int partition = 0; long offset = 23; String key = "key"; String value = "value"; ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, key, value); assertEquals(topic, record.topic()); assertEquals(partition, record.partition()); assertEquals(offset, record.offset()); assertEquals(key, record.key()); assertEquals(value, record.value()); assertEquals(TimestampType.NO_TIMESTAMP_TYPE, record.timestampType()); assertEquals(ConsumerRecord.NO_TIMESTAMP, record.timestamp()); assertEquals(ConsumerRecord.NULL_CHECKSUM, record.checksum()); assertEquals(ConsumerRecord.NULL_SIZE, record.serializedKeySize()); assertEquals(ConsumerRecord.NULL_SIZE, record.serializedValueSize()); assertEquals(Optional.empty(), record.leaderEpoch()); assertEquals(new RecordHeaders(), record.headers()); }
while(iterator.hasNext()){ ConsumerRecord<String, byte[]> record = iterator.next(); if(System.currentTimeMillis() - record.timestamp() > reconsumeCfg.get(level).get(0)){ listJob.add(new MessageHandler(record)); iterator.remove();
private void enhanceHeaders(RecordHeaders kafkaHeaders, ConsumerRecord<?, ?> record, Exception exception) { kafkaHeaders.add( new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TOPIC, record.topic().getBytes(StandardCharsets.UTF_8))); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_PARTITION, ByteBuffer.allocate(Integer.BYTES).putInt(record.partition()).array())); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_OFFSET, ByteBuffer.allocate(Long.BYTES).putLong(record.offset()).array())); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, ByteBuffer.allocate(Long.BYTES).putLong(record.timestamp()).array())); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, record.timestampType().toString().getBytes(StandardCharsets.UTF_8))); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_FQCN, exception.getClass().getName().getBytes(StandardCharsets.UTF_8))); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_MESSAGE, exception.getMessage().getBytes(StandardCharsets.UTF_8))); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_STACKTRACE, getStackTraceAsString(exception).getBytes(StandardCharsets.UTF_8))); }
sample.close(record.timestamp()); brokerMetricSamples.add(sample); LOG.trace("Loaded broker metric sample {}", sample);
offsets.add(record.offset()); timestampTypes.add(record.timestampType().name()); timestamps.add(record.timestamp()); if (this.headerMapper != null) { Map<String, Object> converted = new HashMap<>();
private static void sendData(List<ConsumerRecord<byte[], byte[]>> recordList, @Nullable String txId) { LOG.info("Setting up kafka producer"); Properties producerProps = new Properties(); producerProps.setProperty("bootstrap.servers", KafkaBrokerResource.BROKER_IP_PORT); producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); producerProps.setProperty("max.block.ms", "10000"); if (txId != null) { producerProps.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, txId); } producer = new KafkaProducer<>(producerProps); LOG.info("kafka producer started"); LOG.info("Sending [{}] records", RECORDS.size()); if (txId != null) { producer.initTransactions(); producer.beginTransaction(); } recordList.stream() .map(consumerRecord -> new ProducerRecord<>(consumerRecord.topic(), consumerRecord.partition(), consumerRecord.timestamp(), consumerRecord.key(), consumerRecord.value())) .forEach(producerRecord -> producer.send(producerRecord)); if (txId != null) { producer.commitTransaction(); } producer.close(); }
rawHeaders.put(KafkaHeaders.OFFSET, record.offset()); rawHeaders.put(KafkaHeaders.TIMESTAMP_TYPE, record.timestampType().name()); rawHeaders.put(KafkaHeaders.RECEIVED_TIMESTAMP, record.timestamp());
@Test public void testWriteReadFields2() throws IOException { ConsumerRecord<byte[], byte[]> record = new ConsumerRecord("topic", 0, 3L, "key".getBytes(), "value".getBytes()); KafkaWritable kafkaWritable = new KafkaWritable(record.partition(), record.offset(), record.timestamp(), record.value(), "thisKey".getBytes()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream w = new DataOutputStream(baos); kafkaWritable.write(w); w.flush(); ByteArrayInputStream input = new ByteArrayInputStream(baos.toByteArray()); DataInputStream inputStream = new DataInputStream(input); KafkaWritable actualKafkaWritable = new KafkaWritable(); actualKafkaWritable.readFields(inputStream); Assert.assertEquals(kafkaWritable, actualKafkaWritable); }
@Test public void testWriteReadFields() throws IOException { ConsumerRecord<byte[], byte[]> record = new ConsumerRecord("topic", 0, 3L, "key".getBytes(), "value".getBytes()); KafkaWritable kafkaWritable = new KafkaWritable(record.partition(), record.offset(), record.timestamp(), record.value(), null); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream w = new DataOutputStream(baos); kafkaWritable.write(w); w.flush(); ByteArrayInputStream input = new ByteArrayInputStream(baos.toByteArray()); DataInputStream inputStream = new DataInputStream(input); KafkaWritable actualKafkaWritable = new KafkaWritable(); actualKafkaWritable.readFields(inputStream); Assert.assertEquals(kafkaWritable, actualKafkaWritable); }
private void checkData() { Set<TopicPartition> assignment = Collections.singleton(new TopicPartition(TOPIC, 0)); consumer.assign(assignment); consumer.seekToBeginning(assignment); long numRecords = 0; boolean emptyPoll = false; while (numRecords < RECORD_NUMBER && !emptyPoll) { ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(1000)); Assert.assertFalse(records.records(new TopicPartition(TOPIC, 0)) .stream() .anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0, consumerRecord.timestamp(), consumerRecord.value(), consumerRecord.key())))); emptyPoll = records.isEmpty(); numRecords += records.count(); } Assert.assertEquals(RECORD_NUMBER, numRecords); } }
.stream() .anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0, consumerRecord.timestamp(), consumerRecord.value(), consumerRecord.key()))));
@Test public void testRecordReader() { List<KafkaWritable> serRecords = expectedRecords.stream() .map((consumerRecord) -> new KafkaWritable(consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.value(), consumerRecord.key())) .collect(Collectors.toList()); KafkaRecordReader recordReader = new KafkaRecordReader(); TaskAttemptContext context = new TaskAttemptContextImpl(this.conf, new TaskAttemptID()); recordReader.initialize(new KafkaInputSplit(currentTopic, 0, 50L, 100L, null), context); for (int i = 50; i < 100; ++i) { KafkaWritable record = new KafkaWritable(); Assert.assertTrue(recordReader.next(null, record)); Assert.assertEquals(serRecords.get(i), record); } recordReader.close(); }
/** * Renders the Kafka record into a view. * * <p>A user can customize the way in which a Kafka record is rendered by altering * the "stellar.kafka.message.view" property. * * @param record The Kafka record to render. * @param properties The properties which allows a user to customize the rendered view of a record. * @return */ private static Object render(ConsumerRecord<String, String> record, Properties properties) { LOG.debug("Render message; topic={}, partition={}, offset={}", record.topic(), record.partition(), record.offset()); Object result; if(MESSAGE_VIEW_RICH.equals(getMessageView(properties))) { // build the detailed view of the record Map<String, Object> view = new HashMap<>(); view.put("value", record.value()); view.put("topic", record.topic()); view.put("partition", record.partition()); view.put("offset", record.offset()); view.put("timestamp", record.timestamp()); view.put("key", record.key()); result = view; } else { // default to the simple view result = record.value(); } return result; }