@Override @SuppressWarnings("deprecation") public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) { // This will ensure that we get the cluster metadata when onConsume is called for the first time // as subsequent compareAndSet operations will fail. CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { List<ConsumerRecord<String, String>> lst = new ArrayList<>(); for (ConsumerRecord<String, String> record: records.records(tp)) { lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT))); } recordMap.put(tp, lst); } return new ConsumerRecords<String, String>(recordMap); }
@Override public boolean matches(ConsumerRecord<?, ?> value) { return value != null && (value.timestampType() == this.type && value.timestamp() == this.ts); }
@Override protected boolean matches(Object item, Description mismatchDescription) { @SuppressWarnings(UNCHECKED) ConsumerRecord<Object, Object> record = (ConsumerRecord<Object, Object>) item; boolean matches = record != null && (record.timestampType() == this.type && record.timestamp() == this.ts); if (!matches) { mismatchDescription.appendText(IS_SPACE).appendValue(record); } return matches; }
@Test @SuppressWarnings("deprecation") public void testOldConstructor() { String topic = "topic"; int partition = 0; long offset = 23; String key = "key"; String value = "value"; ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, key, value); assertEquals(topic, record.topic()); assertEquals(partition, record.partition()); assertEquals(offset, record.offset()); assertEquals(key, record.key()); assertEquals(value, record.value()); assertEquals(TimestampType.NO_TIMESTAMP_TYPE, record.timestampType()); assertEquals(ConsumerRecord.NO_TIMESTAMP, record.timestamp()); assertEquals(ConsumerRecord.NULL_CHECKSUM, record.checksum()); assertEquals(ConsumerRecord.NULL_SIZE, record.serializedKeySize()); assertEquals(ConsumerRecord.NULL_SIZE, record.serializedValueSize()); assertEquals(Optional.empty(), record.leaderEpoch()); assertEquals(new RecordHeaders(), record.headers()); }
partitions.add(record.partition()); offsets.add(record.offset()); timestampTypes.add(record.timestampType().name()); timestamps.add(record.timestamp()); if (this.headerMapper != null) {
private void enhanceHeaders(RecordHeaders kafkaHeaders, ConsumerRecord<?, ?> record, Exception exception) { kafkaHeaders.add( new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TOPIC, record.topic().getBytes(StandardCharsets.UTF_8))); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_PARTITION, ByteBuffer.allocate(Integer.BYTES).putInt(record.partition()).array())); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_OFFSET, ByteBuffer.allocate(Long.BYTES).putLong(record.offset()).array())); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, ByteBuffer.allocate(Long.BYTES).putLong(record.timestamp()).array())); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, record.timestampType().toString().getBytes(StandardCharsets.UTF_8))); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_FQCN, exception.getClass().getName().getBytes(StandardCharsets.UTF_8))); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_MESSAGE, exception.getMessage().getBytes(StandardCharsets.UTF_8))); kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_STACKTRACE, getStackTraceAsString(exception).getBytes(StandardCharsets.UTF_8))); }
rawHeaders.put(KafkaHeaders.RECEIVED_PARTITION_ID, record.partition()); rawHeaders.put(KafkaHeaders.OFFSET, record.offset()); rawHeaders.put(KafkaHeaders.TIMESTAMP_TYPE, record.timestampType().name()); rawHeaders.put(KafkaHeaders.RECEIVED_TIMESTAMP, record.timestamp());
@Override public TimestampType timestampType() { return this.record.timestampType(); }
@Override public boolean matches(ConsumerRecord<?, ?> value) { return value != null && (value.timestampType() == this.type && value.timestamp() == this.ts); }
@Override protected boolean matches(Object item, Description mismatchDescription) { @SuppressWarnings(UNCHECKED) ConsumerRecord<Object, Object> record = (ConsumerRecord<Object, Object>) item; boolean matches = record != null && (record.timestampType() == this.type && record.timestamp() == this.ts); if (!matches) { mismatchDescription.appendText(IS_SPACE).appendValue(record); } return matches; }
public KafkaTimestampType getRecordTimestampType(ConsumerRecord<byte[], byte[]> rawRecord) { if (hasRecordTimestamp) { return KafkaTimestampType.forOrdinal(rawRecord.timestampType().ordinal()); } else { return KafkaTimestampType.NO_TIMESTAMP_TYPE; } }
public KafkaTimestampType getRecordTimestampType(ConsumerRecord<byte[], byte[]> rawRecord) { if (hasRecordTimestamp) { return KafkaTimestampType.forOrdinal(rawRecord.timestampType().ordinal()); } else { return KafkaTimestampType.NO_TIMESTAMP_TYPE; } }
public String toString(boolean includeMessage) { StringBuilder builder = new StringBuilder(); builder.append("Executing stage '"); builder.append(stage().name()); builder.append("' with class '"); builder.append(executingClass() == null ? "null" : executingClass().getName()); builder.append('\''); if (includeMessage && sourceRecord() != null) { builder.append(", where source record is = "); builder.append(sourceRecord()); } else if (includeMessage && consumerRecord() != null) { ConsumerRecord<byte[], byte[]> msg = consumerRecord(); builder.append(", where consumed record is "); builder.append("{topic='").append(msg.topic()).append('\''); builder.append(", partition=").append(msg.partition()); builder.append(", offset=").append(msg.offset()); if (msg.timestampType() == TimestampType.CREATE_TIME || msg.timestampType() == TimestampType.LOG_APPEND_TIME) { builder.append(", timestamp=").append(msg.timestamp()); builder.append(", timestampType=").append(msg.timestampType()); } builder.append("}"); } builder.append('.'); return builder.toString(); }
dataOut.writeLong(record.offset()); dataOut.writeLong(record.timestamp()); dataOut.writeUTF(record.timestampType().name); dataOut.writeLong(record.checksum()); dataOut.writeInt(record.serializedKeySize());
public DBusConsumerRecord(ConsumerRecord<K, V> record) { this(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedKeySize(), record.key(), record.value()); }
@SuppressWarnings("deprecation") public ReceiverRecord(ConsumerRecord<K, V> consumerRecord, ReceiverOffset receiverOffset) { super(consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.timestampType(), consumerRecord.checksum(), consumerRecord.serializedKeySize(), consumerRecord.serializedValueSize(), consumerRecord.key(), consumerRecord.value(), consumerRecord.headers()); this.receiverOffset = receiverOffset; }
@SuppressWarnings("deprecation") public ReceiverRecord(ConsumerRecord<K, V> consumerRecord, ReceiverOffset receiverOffset) { super(consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.timestampType(), consumerRecord.checksum(), consumerRecord.serializedKeySize(), consumerRecord.serializedValueSize(), consumerRecord.key(), consumerRecord.value(), consumerRecord.headers()); this.receiverOffset = receiverOffset; }
private SinkRecord convertAndTransformRecord(final ConsumerRecord<byte[], byte[]> msg) { SchemaAndValue keyAndSchema = retryWithToleranceOperator.execute(() -> keyConverter.toConnectData(msg.topic(), msg.key()), Stage.KEY_CONVERTER, keyConverter.getClass()); SchemaAndValue valueAndSchema = retryWithToleranceOperator.execute(() -> valueConverter.toConnectData(msg.topic(), msg.value()), Stage.VALUE_CONVERTER, valueConverter.getClass()); Headers headers = retryWithToleranceOperator.execute(() -> convertHeadersFor(msg), Stage.HEADER_CONVERTER, headerConverter.getClass()); if (retryWithToleranceOperator.failed()) { return null; } Long timestamp = ConnectUtils.checkAndConvertTimestamp(msg.timestamp()); SinkRecord origRecord = new SinkRecord(msg.topic(), msg.partition(), keyAndSchema.schema(), keyAndSchema.value(), valueAndSchema.schema(), valueAndSchema.value(), msg.offset(), timestamp, msg.timestampType(), headers); log.trace("{} Applying transformations to record in topic '{}' partition {} at offset {} and timestamp {} with key {} and value {}", this, msg.topic(), msg.partition(), msg.offset(), timestamp, keyAndSchema.value(), valueAndSchema.value()); return transformationChain.apply(origRecord); }
private ConsumerRecord<K, V> handleConsumerRecord(ConsumerRecord<byte[], byte[]> consumerRecord) { TopicPartition tp = new TopicPartition(consumerRecord.topic(), consumerRecord.partition()); ConsumerRecord<K, V> handledRecord = null; K key = _keyDeserializer.deserialize(tp.topic(), consumerRecord.key()); byte[] valueBytes = parseAndMaybeTrackRecord(tp, consumerRecord.offset(), consumerRecord.value()); if (valueBytes != INCOMPLETE_RESULT) { V value = _valueDeserializer.deserialize(tp.topic(), valueBytes); if (_auditor != null) { long sizeInBytes = (consumerRecord.key() == null ? 0 : consumerRecord.key().length) + (valueBytes == null ? 0 : valueBytes.length); _auditor.record(_auditor.auditToken(key, value), tp.topic(), consumerRecord.timestamp(), 1L, sizeInBytes, AuditType.SUCCESS); } _partitionConsumerHighWatermark.computeIfAbsent(tp, _storedConsumerHighWatermark)._currentConsumerHighWatermark = consumerRecord.offset(); handledRecord = new ConsumerRecord<>( consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.timestampType(), consumerRecord.checksum(), consumerRecord.serializedKeySize(), valueBytes == null ? 0 : valueBytes.length, _keyDeserializer.deserialize(consumerRecord.topic(), consumerRecord.key()), value); } return handledRecord; }
record.offset(), record.timestamp(), record.timestampType(), 0L, record.serializedKeySize(),