We tried doing the same however we see the connector is detecting 0 data (consumer lag 0 from beginning) using the filter as below, do you have any further suggestion ?
apmt.mvii-nlrot.cdh.uat2.oraclesink-rhel-test-v2: |
{
"name":"apmt.mvii-nlrot.cdh.uat2.oraclesink-rhel-test-v2",
"config":{
"connector.class":"io.confluent.connect.jdbc.JdbcSinkConnector",
"
consumer.override.group.id":"apmt.mvii-nlrot.cdh.uat2.oraclesink-rhel-test-v2",
"tasks.max":"3",
"topics":"apmt.mvii-nlrot.n4.uat2.topic.confidential.dedicated.cdh-datastream.vsl_crane_statistics",
"connection.url":"xxxx",
"connection.user":"xxxx",
"connection.password":"${secrets:kafka-connect-connectors-passwords:oracle-uat-password}",
"transforms":"unwrap,striptopic,InsertField,FilterEarlierThan",
"transforms.unwrap.type":"io.debezium.transforms.ExtractNewRecordState",
"transforms.unwrap.drop.tombstones":"false",
"transforms.striptopic.type":"org.apache.kafka.connect.transforms.RegexRouter",
"transforms.striptopic.regex":"apmt.mvii-nlrot.n4.uat2.topic.confidential.dedicated.cdh-datastream.(.*)",
"transforms.striptopic.replacement":"$1_test",
"transforms.InsertField.type": "org.apache.kafka.connect.transforms.InsertField$Value",
"transforms.InsertField.timestamp.field": "messagets",