currentRecord.topic(), currentRecord.kafkaPartition(), currentRecord.keySchema(), currentRecord.key(), currentRecord.valueSchema(), currentRecord.value()));
appendAdditional("sourceOffset", record.sourceOffset()); appendAdditional("topic", record.topic()); appendAdditional("kafkaPartition", record.kafkaPartition()); if (detailed) { appendAdditional("keySchema", record.keySchema());
/** * Stop buffering source records, and flush any buffered records by replacing their offset with the provided offset. * Note that this only replaces the record's {@link SourceRecord#sourceOffset() offset} and does not change the * value of the record, which may contain information about the snapshot. * * @param newOffset the offset that reflects that the snapshot has been completed; may not be null * @throws InterruptedException if the thread is interrupted while waiting for the new record to be flushed */ protected synchronized void stopBuffering(Map<String, ?> newOffset) throws InterruptedException { assert newOffset != null; this.buffered.close(record -> { if (record == null) return null; return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }); this.current = this.actual; }
/** * Utility method to replace the offset in the given record with the latest. This is used on the last record produced * during the snapshot. * * @param record the record * @return the updated record */ protected SourceRecord replaceOffset(SourceRecord record) { if (record == null) return null; Map<String, ?> newOffset = context.source().offset(); return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }
public static void assertEquals(SourceRecord actual, SourceRecord expected, Predicate<String> ignoreFields, Map<String, RecordValueComparator> comparatorsByName, Map<String, RecordValueComparator> comparatorsBySchemaName) { assertThat(actual).isNotNull(); assertThat(expected).isNotNull(); assertEquals(null, actual.sourcePartition(), expected.sourcePartition(), "sourcePartition", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(null, actual.sourceOffset(), expected.sourceOffset(), "sourceOffset", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertThat(actual.topic()).isEqualTo(expected.topic()); assertThat(actual.kafkaPartition()).isEqualTo(expected.kafkaPartition()); Schema actualKeySchema = actual.keySchema(); Schema actualValueSchema = actual.valueSchema(); Schema expectedKeySchema = expected.keySchema(); Schema expectedValueSchema = expected.valueSchema(); if (!Objects.equals(actualKeySchema, expectedKeySchema)) { String actualStr = SchemaUtil.asString(actualKeySchema); String expectedStr = SchemaUtil.asString(expectedKeySchema); assertThat(actualStr).as("The key schema for record with key " + SchemaUtil.asString(actual.key()) + " did not match expected schema").isEqualTo(expectedStr); } if (!Objects.equals(actualValueSchema, expectedValueSchema)) { String actualStr = SchemaUtil.asString(actualValueSchema); String expectedStr = SchemaUtil.asString(expectedValueSchema); assertThat(actualStr).isEqualTo(expectedStr); } assertEquals(actualKeySchema, actual.key(), expected.key(), "key", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(actualValueSchema, actual.value(), expected.value(), "value", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); }
/** * Serialize the source record to document form. * * @param record the record; may not be null * @param keyConverter the converter for the record key's schema and payload * @param valueConverter the converter for the record value's schema and payload * @return the document form of the source record; never null * @throws IOException if there is an error converting the key or value */ private Document serializeSourceRecord(SourceRecord record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException { Document keyAndSchema = keyConverter.serialize(record.topic(), record.keySchema(), record.key()); Document valueAndSchema = valueConverter.serialize(record.topic(), record.valueSchema(), record.value()); Document sourcePartition = Document.create().putAll(record.sourcePartition()); Document sourceOffset = Document.create().putAll(record.sourceOffset()); Document parent = Document.create(); parent.set("sourcePartition", sourcePartition); parent.set("sourceOffset", sourceOffset); parent.set("topic", record.topic()); parent.set("kafkaPartition", record.kafkaPartition()); parent.set("keySchema", keyAndSchema.getDocument("schema")); parent.set("key", keyAndSchema.getDocument("payload")); parent.set("valueSchema", valueAndSchema.getDocument("schema")); parent.set("value", valueAndSchema.getDocument("payload")); return parent; }
@Override public Dataset<Row> getBatch(Option<Offset> start, Offset end) { return sqlContext.createDataFrame( sharedSourceTaskContext.read(start.isDefined() ? Optional.of(start.get()) : Optional.empty(), end) .stream() .map(record -> new GenericRow(new Object[]{ record.topic(), record.kafkaPartition(), keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key()), valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value()) })).collect(Collectors.toList()), DATA_SCHEMA); }
@Override public void serialize(SourceRecord record, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException, JsonProcessingException { Storage storage = new Storage(); storage.sourcePartition = record.sourcePartition(); storage.sourceOffset = record.sourceOffset(); storage.topic = record.topic(); storage.kafkaPartition = record.kafkaPartition(); storage.keySchema = record.keySchema(); storage.key = record.key(); storage.valueSchema = record.valueSchema(); storage.value = record.value(); storage.timestamp = record.timestamp(); if (null != record.headers()) { List<Header> headers = new ArrayList<>(); for (Header header : record.headers()) { headers.add(header); } storage.headers = headers; } jsonGenerator.writeObject(storage); } }
/** * Convert the source record into a producer record. * * @param record the transformed record * @return the producer record which can sent over to Kafka. A null is returned if the input is null or * if an error was encountered during any of the converter stages. */ private ProducerRecord<byte[], byte[]> convertTransformedRecord(SourceRecord record) { if (record == null) { return null; } RecordHeaders headers = retryWithToleranceOperator.execute(() -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverter.getClass()); byte[] key = retryWithToleranceOperator.execute(() -> keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key()), Stage.KEY_CONVERTER, keyConverter.getClass()); byte[] value = retryWithToleranceOperator.execute(() -> valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value()), Stage.VALUE_CONVERTER, valueConverter.getClass()); if (retryWithToleranceOperator.failed()) { return null; } return new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers); }
appendAdditional("sourceOffset", record.sourceOffset()); appendAdditional("topic", record.topic()); appendAdditional("kafkaPartition", record.kafkaPartition()); if (detailed) { appendAdditional("keySchema", record.keySchema());
currentRecord.topic(), currentRecord.kafkaPartition(), currentRecord.keySchema(), currentRecord.key(), currentRecord.valueSchema(), currentRecord.value()));
/** * Serialize the source record to document form. * * @param record the record; may not be null * @param keyConverter the converter for the record key's schema and payload * @param valueConverter the converter for the record value's schema and payload * @return the document form of the source record; never null * @throws IOException if there is an error converting the key or value */ private Document serializeSourceRecord(SourceRecord record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException { Document keyAndSchema = keyConverter.serialize(record.topic(), record.keySchema(), record.key()); Document valueAndSchema = valueConverter.serialize(record.topic(), record.valueSchema(), record.value()); Document sourcePartition = Document.create().putAll(record.sourcePartition()); Document sourceOffset = Document.create().putAll(record.sourceOffset()); Document parent = Document.create(); parent.set("sourcePartition", sourcePartition); parent.set("sourceOffset", sourceOffset); parent.set("topic", record.topic()); parent.set("kafkaPartition", record.kafkaPartition()); parent.set("keySchema", keyAndSchema.getDocument("schema")); parent.set("key", keyAndSchema.getDocument("payload")); parent.set("valueSchema", valueAndSchema.getDocument("schema")); parent.set("value", valueAndSchema.getDocument("payload")); return parent; }