protected static Struct keyFor(SourceRecord record) { return (Struct) record.key(); }
/** * Stop buffering source records, and flush any buffered records by replacing their offset with the provided offset. * Note that this only replaces the record's {@link SourceRecord#sourceOffset() offset} and does not change the * value of the record, which may contain information about the snapshot. * * @param newOffset the offset that reflects that the snapshot has been completed; may not be null * @throws InterruptedException if the thread is interrupted while waiting for the new record to be flushed */ protected synchronized void stopBuffering(Map<String, ?> newOffset) throws InterruptedException { assert newOffset != null; this.buffered.close(record -> { if (record == null) return null; return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }); this.current = this.actual; }
/** * Utility method to replace the offset in the given record with the latest. This is used on the last record produced * during the snapshot. * * @param record the record * @return the updated record */ protected SourceRecord replaceOffset(SourceRecord record) { if (record == null) return null; Map<String, ?> newOffset = context.source().offset(); return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }
private Predicate<SourceRecord> stopOnPKPredicate(int pkValue) { return record -> { Struct key = (Struct) record.key(); return ((Integer) key.get(PK_FIELD)) == pkValue; }; }
@Test @FixFor("DBZ-1086") public void testKeyNullValue() { final ByLogicalTableRouter<SourceRecord> router = new ByLogicalTableRouter<>(); final Map<String, String> props = new HashMap<>(); props.put("topic.regex", "(.*)customers_shard(.*)"); props.put("topic.replacement", "$1customers_all_shards"); props.put("key.field.name", "shard_id"); props.put("key.field.regex", "(.*)customers_shard_(.*)"); props.put("key.field.replacement", "$2"); router.configure(props); SourceRecord record1 = new SourceRecord( new HashMap<>(), new HashMap<>(), "mysql-server-1.inventory.customers_shard_1", null, null, null, null ); SourceRecord transformed1 = router.apply(record1); assertThat(transformed1).isNotNull(); assertThat(transformed1.topic()).isEqualTo("mysql-server-1.inventory.customers_all_shards"); assertThat(transformed1.keySchema()).isNull(); assertThat(transformed1.key()).isNull(); }
/** * Verify that the given {@link SourceRecord} has a valid non-null integer key that matches the expected integer value. * * @param record the source record; may not be null * @param pkField the single field defining the primary key of the struct; may not be null * @param pk the expected integer value of the primary key in the struct */ public static void hasValidKey(SourceRecord record, String pkField, int pk) { Struct key = (Struct) record.key(); assertThat(key.get(pkField)).isEqualTo(pk); }
/** * Verify that the given {@link SourceRecord} is a valid tombstone, meaning it has a non-null key and key schema but null * value and value schema. * * @param record the source record; may not be null */ public static void isValidTombstone(SourceRecord record) { assertThat(record.key()).isNotNull(); assertThat(record.keySchema()).isNotNull(); assertThat(record.value()).isNull(); assertThat(record.valueSchema()).isNull(); }
private void assertHeartBeatRecordInserted() { assertFalse("records not generated", consumer.isEmpty()); SourceRecord heartbeat = consumer.remove(); assertEquals("__debezium-heartbeat." + TestHelper.TEST_SERVER, heartbeat.topic()); Struct key = (Struct) heartbeat.key(); assertThat(key.get("serverName")).isEqualTo(TestHelper.TEST_SERVER); }
/** * Verify that the given {@link SourceRecord} is a {@link Operation#UPDATE UPDATE} record. * * @param record the source record; may not be null */ public static void isValidUpdate(SourceRecord record, boolean keyExpected) { if (keyExpected) { assertThat(record.key()).isNotNull(); assertThat(record.keySchema()).isNotNull(); } else { assertThat(record.key()).isNull(); assertThat(record.keySchema()).isNull(); } assertThat(record.valueSchema()).isNotNull(); Struct value = (Struct) record.value(); assertThat(value).isNotNull(); assertThat(value.getString(FieldName.OPERATION)).isEqualTo(Operation.UPDATE.code()); assertThat(value.get(FieldName.AFTER)).isNotNull(); // assertThat(value.get(FieldName.BEFORE)).isNull(); // may be null }
/** * Verify that the given {@link SourceRecord} is a {@link Operation#CREATE INSERT/CREATE} record. * * @param record the source record; may not be null */ public static void isValidInsert(SourceRecord record, boolean keyExpected) { if (keyExpected) { assertThat(record.key()).isNotNull(); assertThat(record.keySchema()).isNotNull(); } else { assertThat(record.key()).isNull(); assertThat(record.keySchema()).isNull(); } assertThat(record.valueSchema()).isNotNull(); Struct value = (Struct) record.value(); assertThat(value).isNotNull(); assertThat(value.getString(FieldName.OPERATION)).isEqualTo(Operation.CREATE.code()); assertThat(value.get(FieldName.AFTER)).isNotNull(); assertThat(value.get(FieldName.BEFORE)).isNull(); }
/** * Verify that the given {@link SourceRecord} is a {@link Operation#DELETE DELETE} record. * * @param record the source record; may not be null */ public static void isValidDelete(SourceRecord record, boolean keyExpected) { if (keyExpected) { assertThat(record.key()).isNotNull(); assertThat(record.keySchema()).isNotNull(); } else { assertThat(record.key()).isNull(); assertThat(record.keySchema()).isNull(); } assertThat(record.valueSchema()).isNotNull(); Struct value = (Struct) record.value(); assertThat(value).isNotNull(); assertThat(value.getString(FieldName.OPERATION)).isEqualTo(Operation.DELETE.code()); assertThat(value.get(FieldName.BEFORE)).isNotNull(); assertThat(value.get(FieldName.AFTER)).isNull(); }
public static void assertEquals(SourceRecord actual, SourceRecord expected, Predicate<String> ignoreFields, Map<String, RecordValueComparator> comparatorsByName, Map<String, RecordValueComparator> comparatorsBySchemaName) { assertThat(actual).isNotNull(); assertThat(expected).isNotNull(); assertEquals(null, actual.sourcePartition(), expected.sourcePartition(), "sourcePartition", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(null, actual.sourceOffset(), expected.sourceOffset(), "sourceOffset", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertThat(actual.topic()).isEqualTo(expected.topic()); assertThat(actual.kafkaPartition()).isEqualTo(expected.kafkaPartition()); Schema actualKeySchema = actual.keySchema(); Schema actualValueSchema = actual.valueSchema(); Schema expectedKeySchema = expected.keySchema(); Schema expectedValueSchema = expected.valueSchema(); if (!Objects.equals(actualKeySchema, expectedKeySchema)) { String actualStr = SchemaUtil.asString(actualKeySchema); String expectedStr = SchemaUtil.asString(expectedKeySchema); assertThat(actualStr).as("The key schema for record with key " + SchemaUtil.asString(actual.key()) + " did not match expected schema").isEqualTo(expectedStr); } if (!Objects.equals(actualValueSchema, expectedValueSchema)) { String actualStr = SchemaUtil.asString(actualValueSchema); String expectedStr = SchemaUtil.asString(expectedValueSchema); assertThat(actualStr).isEqualTo(expectedStr); } assertEquals(actualKeySchema, actual.key(), expected.key(), "key", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(actualValueSchema, actual.value(), expected.value(), "value", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); }
Struct key = (Struct) transformed.key(); Struct value = (Struct) transformed.value();
/** * Verify that the given {@link SourceRecord} is a {@link Operation#READ READ} record. * * @param record the source record; may not be null */ public static void isValidRead(SourceRecord record) { assertThat(record.key()).isNotNull(); assertThat(record.keySchema()).isNotNull(); assertThat(record.valueSchema()).isNotNull(); Struct value = (Struct) record.value(); assertThat(value).isNotNull(); assertThat(value.getString(FieldName.OPERATION)).isEqualTo(Operation.READ.code()); assertThat(value.get(FieldName.AFTER)).isNotNull(); assertThat(value.get(FieldName.BEFORE)).isNull(); }
Struct key = (Struct) transformed.key(); Struct value = (Struct) transformed.value();
@Test public void shouldGenerateRecordForInsertEvent() throws InterruptedException { CollectionId collectionId = new CollectionId("rs0", "dbA", "c1"); BsonTimestamp ts = new BsonTimestamp(1000, 1); ObjectId objId = new ObjectId(); Document obj = new Document().append("_id", objId).append("name", "Sally"); Document event = new Document().append("o", obj) .append("ns", "dbA.c1") .append("ts", ts) .append("h", Long.valueOf(12345678)) .append("op", "i"); RecordsForCollection records = recordMakers.forCollection(collectionId); records.recordEvent(event, 1002); assertThat(produced.size()).isEqualTo(1); SourceRecord record = produced.get(0); Struct key = (Struct) record.key(); Struct value = (Struct) record.value(); assertThat(key.schema()).isSameAs(record.keySchema()); assertThat(key.get("id")).isEqualTo("{ \"$oid\" : \"" + objId + "\"}"); assertThat(value.schema()).isSameAs(record.valueSchema()); // assertThat(value.getString(FieldName.BEFORE)).isNull(); assertThat(value.getString(FieldName.AFTER)).isEqualTo(obj.toJson(WRITER_SETTINGS)); assertThat(value.getString(FieldName.OPERATION)).isEqualTo(Operation.CREATE.code()); assertThat(value.getInt64(FieldName.TIMESTAMP)).isEqualTo(1002L); Struct actualSource = value.getStruct(FieldName.SOURCE); Struct expectedSource = source.lastOffsetStruct("rs0", collectionId); assertThat(actualSource).isEqualTo(expectedSource); }
@Test public void shouldGenerateRecordForUpdateEvent() throws InterruptedException { BsonTimestamp ts = new BsonTimestamp(1000, 1); CollectionId collectionId = new CollectionId("rs0", "dbA", "c1"); ObjectId objId = new ObjectId(); Document obj = new Document().append("$set", new Document("name", "Sally")); Document event = new Document().append("o", obj) .append("o2", objId) .append("ns", "dbA.c1") .append("ts", ts) .append("h", Long.valueOf(12345678)) .append("op", "u"); RecordsForCollection records = recordMakers.forCollection(collectionId); records.recordEvent(event, 1002); assertThat(produced.size()).isEqualTo(1); SourceRecord record = produced.get(0); Struct key = (Struct) record.key(); Struct value = (Struct) record.value(); assertThat(key.schema()).isSameAs(record.keySchema()); assertThat(key.get("id")).isEqualTo(JSONSerializers.getStrict().serialize(objId)); assertThat(value.schema()).isSameAs(record.valueSchema()); // assertThat(value.getString(FieldName.BEFORE)).isNull(); assertThat(value.getString(FieldName.AFTER)).isNull(); assertThat(value.getString("patch")).isEqualTo(obj.toJson(WRITER_SETTINGS)); assertThat(value.getString(FieldName.OPERATION)).isEqualTo(Operation.UPDATE.code()); assertThat(value.getInt64(FieldName.TIMESTAMP)).isEqualTo(1002L); Struct actualSource = value.getStruct(FieldName.SOURCE); Struct expectedSource = source.lastOffsetStruct("rs0", collectionId); assertThat(actualSource).isEqualTo(expectedSource); }
@Test @FixFor("DBZ-582") public void shouldGenerateRecordForDeleteEventWithoutTombstone() throws InterruptedException { RecordMakers recordMakers = new RecordMakers(filters, source, topicSelector, produced::add, false); BsonTimestamp ts = new BsonTimestamp(1000, 1); CollectionId collectionId = new CollectionId("rs0", "dbA", "c1"); ObjectId objId = new ObjectId(); Document obj = new Document("_id", objId); Document event = new Document().append("o", obj) .append("ns", "dbA.c1") .append("ts", ts) .append("h", new Long(12345678)) .append("op", "d"); RecordsForCollection records = recordMakers.forCollection(collectionId); records.recordEvent(event, 1002); assertThat(produced.size()).isEqualTo(1); SourceRecord record = produced.get(0); Struct key = (Struct) record.key(); Struct value = (Struct) record.value(); assertThat(key.schema()).isSameAs(record.keySchema()); assertThat(key.get("id")).isEqualTo(JSONSerializers.getStrict().serialize(objId)); assertThat(value.schema()).isSameAs(record.valueSchema()); assertThat(value.getString(FieldName.AFTER)).isNull(); assertThat(value.getString("patch")).isNull(); assertThat(value.getString(FieldName.OPERATION)).isEqualTo(Operation.DELETE.code()); assertThat(value.getInt64(FieldName.TIMESTAMP)).isEqualTo(1002L); Struct actualSource = value.getStruct(FieldName.SOURCE); Struct expectedSource = source.lastOffsetStruct("rs0", collectionId); assertThat(actualSource).isEqualTo(expectedSource); }
/** * Serialize the source record to document form. * * @param record the record; may not be null * @param keyConverter the converter for the record key's schema and payload * @param valueConverter the converter for the record value's schema and payload * @return the document form of the source record; never null * @throws IOException if there is an error converting the key or value */ private Document serializeSourceRecord(SourceRecord record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException { Document keyAndSchema = keyConverter.serialize(record.topic(), record.keySchema(), record.key()); Document valueAndSchema = valueConverter.serialize(record.topic(), record.valueSchema(), record.value()); Document sourcePartition = Document.create().putAll(record.sourcePartition()); Document sourceOffset = Document.create().putAll(record.sourceOffset()); Document parent = Document.create(); parent.set("sourcePartition", sourcePartition); parent.set("sourceOffset", sourceOffset); parent.set("topic", record.topic()); parent.set("kafkaPartition", record.kafkaPartition()); parent.set("keySchema", keyAndSchema.getDocument("schema")); parent.set("key", keyAndSchema.getDocument("payload")); parent.set("valueSchema", valueAndSchema.getDocument("schema")); parent.set("value", valueAndSchema.getDocument("payload")); return parent; }
Struct key = (Struct) transformed.key(); Struct value = (Struct) transformed.value();