private int readNextBatch(VectorizedRowBatch vectorizedRowBatch, Iterator<ConsumerRecord<byte[], byte[]>> recordIterator) throws SerDeException { int rowsCount = 0; while (recordIterator.hasNext() && rowsCount < vectorizedRowBatch.getMaxSize()) { ConsumerRecord<byte[], byte[]> kRecord = recordIterator.next(); kafkaWritable.set(kRecord); readBytes += kRecord.serializedKeySize() + kRecord.serializedValueSize(); if (projectedColumns.length > 0) { serDe.deserializeKWritable(kafkaWritable, row); for (int i : projectedColumns) { vectorAssignRow.assignRowColumn(vectorizedRowBatch, rowsCount, i, row[i]); } } rowsCount++; } vectorizedRowBatch.size = rowsCount; consumedRecords += rowsCount; cleanRowBoat(); return rowsCount; }
private List<TopicSensors.SensorMetric<ConsumerRecord>> buildSensors(final String key) { final List<TopicSensors.SensorMetric<ConsumerRecord>> sensors = new ArrayList<>(); // Note: synchronized due to metrics registry not handling concurrent add/check-exists // activity in a reliable way synchronized (this.metrics) { addSensor(key, CONSUMER_MESSAGES_PER_SEC, new Rate(), sensors, false); addSensor(key, CONSUMER_TOTAL_MESSAGES, new Total(), sensors, false); addSensor(key, CONSUMER_TOTAL_BYTES, new Total(), sensors, false, (r) -> { if (r == null) { return 0.0; } else { return ((double) r.serializedValueSize() + r.serializedKeySize()); } }); } return sensors; }
@Override @SuppressWarnings("deprecation") public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) { // This will ensure that we get the cluster metadata when onConsume is called for the first time // as subsequent compareAndSet operations will fail. CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { List<ConsumerRecord<String, String>> lst = new ArrayList<>(); for (ConsumerRecord<String, String> record: records.records(tp)) { lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT))); } recordMap.put(tp, lst); } return new ConsumerRecords<String, String>(recordMap); }
@Test @SuppressWarnings("deprecation") public void testOldConstructor() { String topic = "topic"; int partition = 0; long offset = 23; String key = "key"; String value = "value"; ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, key, value); assertEquals(topic, record.topic()); assertEquals(partition, record.partition()); assertEquals(offset, record.offset()); assertEquals(key, record.key()); assertEquals(value, record.value()); assertEquals(TimestampType.NO_TIMESTAMP_TYPE, record.timestampType()); assertEquals(ConsumerRecord.NO_TIMESTAMP, record.timestamp()); assertEquals(ConsumerRecord.NULL_CHECKSUM, record.checksum()); assertEquals(ConsumerRecord.NULL_SIZE, record.serializedKeySize()); assertEquals(ConsumerRecord.NULL_SIZE, record.serializedValueSize()); assertEquals(Optional.empty(), record.leaderEpoch()); assertEquals(new RecordHeaders(), record.headers()); }
private int getRecordSize(ConsumerRecord<K, V> r) { int keySize = (r.key() == null) ? 0 : r.serializedKeySize(); return keySize + r.serializedValueSize(); }
private int getRecordSize(ConsumerRecord<K, V> r) { int keySize = (r.key() == null) ? 0 : r.serializedKeySize(); return keySize + r.serializedValueSize(); }
private int getRecordSize(ConsumerRecord<K, V> r) { int keySize = (r.key() == null) ? 0 : r.serializedKeySize(); return keySize + r.serializedValueSize(); }
public DBusConsumerRecord(ConsumerRecord<K, V> record) { this(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedKeySize(), record.key(), record.value()); }
dataOut.writeUTF(record.timestampType().name); dataOut.writeLong(record.checksum()); dataOut.writeInt(record.serializedKeySize()); dataOut.writeInt(record.serializedValueSize());
private int readNextBatch(VectorizedRowBatch vectorizedRowBatch, Iterator<ConsumerRecord<byte[], byte[]>> recordIterator) throws SerDeException { int rowsCount = 0; while (recordIterator.hasNext() && rowsCount < vectorizedRowBatch.getMaxSize()) { ConsumerRecord<byte[], byte[]> kRecord = recordIterator.next(); kafkaWritable.set(kRecord); readBytes += kRecord.serializedKeySize() + kRecord.serializedValueSize(); if (projectedColumns.length > 0) { ArrayList<Object> row = serDe.deserializeKWritable(kafkaWritable); for (int i : projectedColumns) { vectorAssignRow.assignRowColumn(vectorizedRowBatch, rowsCount, i, row.get(i)); } } rowsCount++; } vectorizedRowBatch.size = rowsCount; consumedRecords += rowsCount; return rowsCount; }
@SuppressWarnings("deprecation") public ReceiverRecord(ConsumerRecord<K, V> consumerRecord, ReceiverOffset receiverOffset) { super(consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.timestampType(), consumerRecord.checksum(), consumerRecord.serializedKeySize(), consumerRecord.serializedValueSize(), consumerRecord.key(), consumerRecord.value(), consumerRecord.headers()); this.receiverOffset = receiverOffset; }
@SuppressWarnings("deprecation") public ReceiverRecord(ConsumerRecord<K, V> consumerRecord, ReceiverOffset receiverOffset) { super(consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.timestampType(), consumerRecord.checksum(), consumerRecord.serializedKeySize(), consumerRecord.serializedValueSize(), consumerRecord.key(), consumerRecord.value(), consumerRecord.headers()); this.receiverOffset = receiverOffset; }
record.timestampType(), 0L, record.serializedKeySize(), record.serializedValueSize(), key,
TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), sourceNode.deserializeKey(rawRecord.topic(), rawRecord.headers(), rawRecord.key()),
private ConsumerRecord<K, V> handleConsumerRecord(ConsumerRecord<byte[], byte[]> consumerRecord) { TopicPartition tp = new TopicPartition(consumerRecord.topic(), consumerRecord.partition()); ConsumerRecord<K, V> handledRecord = null; K key = _keyDeserializer.deserialize(tp.topic(), consumerRecord.key()); byte[] valueBytes = parseAndMaybeTrackRecord(tp, consumerRecord.offset(), consumerRecord.value()); if (valueBytes != INCOMPLETE_RESULT) { V value = _valueDeserializer.deserialize(tp.topic(), valueBytes); if (_auditor != null) { long sizeInBytes = (consumerRecord.key() == null ? 0 : consumerRecord.key().length) + (valueBytes == null ? 0 : valueBytes.length); _auditor.record(_auditor.auditToken(key, value), tp.topic(), consumerRecord.timestamp(), 1L, sizeInBytes, AuditType.SUCCESS); } _partitionConsumerHighWatermark.computeIfAbsent(tp, _storedConsumerHighWatermark)._currentConsumerHighWatermark = consumerRecord.offset(); handledRecord = new ConsumerRecord<>( consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp(), consumerRecord.timestampType(), consumerRecord.checksum(), consumerRecord.serializedKeySize(), valueBytes == null ? 0 : valueBytes.length, _keyDeserializer.deserialize(consumerRecord.topic(), consumerRecord.key()), value); } return handledRecord; }