Hope this message finds you well.
Today, I came here with a different issue. I'm facing myself in the middle of ad hoc snapshot tuning.
I tried adding some tables with different sizes and I got stuck during the process.
I'm trying to add a table after the initial snapshot, using the ad hoc functionality that allows me to do that. This table has almost 20M rows.
The problem is that the snapshot gets stuck, i'd probably say frozen/paused during the process. I ran the process during the whole weekend and it barely added 1.5M rows.
{
"name": "xxxx-source",
"config": {
"connector.class": "io.debezium.connector.oracle.OracleConnector",
"database.hostname": "xxxx",
"database.port": "xxx",
"database.user": "xxxx",
"database.password": "xxxx",
"database.dbname": "xxxx",
"
database.name": "xxxx",
"
database.server.name": "xxx",
"database.history.kafka.topic": "xxx-schema-changes",
"database.history.kafka.bootstrap.servers": "kafka:9092",
"schema.history.internal.kafka.topic": "xxxx-schema-changes",
"schema.history.internal.kafka.bootstrap.servers": "kafka:9092",
"schema.include.list": "xxxxx, xxxxx",
"table.include.list": "xxxxxx",
"datatype.propagate.source.type": "xxxxx",
"schema.history.internal.store.only.captured.tables.ddl": "true",
"schema.history.internal.store.only.captured.databases.ddl": "true",
"signal.data.collection": "xxxx.xxxx.xxxx",
"decimal.handling.mode": "precise",
"snapshot.mode": "initial",
"snapshot.max.threads": "8",
"incremental.snapshot.chunk.size": "5000",
"snapshot.fetch.size": "2000",
"topic.prefix": "xxxx",
"
heartbeat.interval.ms": "5000",
"heartbeat.action.query": "INSERT INTO xxxx.xxxx VALUES ('heartbeat')",
"log.mining.strategy": "hybrid",
"
log.mining.transaction.retention.ms": "172800000",
"
log.mining.flush.table.name": "xxxxx",
"log.mining.batch.size.min": "10000",
"log.mining.batch.size.max": "20000",
"
log.mining.sleep.time.min.ms": "50",
"
log.mining.sleep.time.max.ms": "200",
"log.mining.archive.log.only.mode": "false",
"
log.mining.database.connection.reuse.interval.ms": "60000",
"key.converter.schemas.enable": "true",
"value.converter.schemas.enable": "true",
"key.converter": "io.confluent.connect.avro.AvroConverter",
"key.converter.schema.registry.url": "
http://schema-registry:8081",
"value.converter": "io.confluent.connect.avro.AvroConverter",
"value.converter.schema.registry.url": "
http://schema-registry:8081"
Please feel free to suggest anything that might help.