Logminer performance issue

101 views
Skip to first unread message

Gergely Jahn

unread,
Feb 3, 2026, 5:12:03 PMFeb 3
to debezium
Hi Community,

We run Debezium Connector for Oracle 3.4 with Logminer with the following configuration:

{
    "schema.registry.url": "https://***************:8081,https://***************:8081",
    "schema.history.internal.kafka.topic": "schema-changes.SRC_ORA_FLX_001.DEV",
    "schema.history.internal.kafka.bootstrap.servers": "***************:9092,***************:9092",
    "schema.history.internal.producer.security.protocol": "SSL",
    "schema.history.internal.producer.ssl.keystore.location": "/app/ssl/private/kafka_connect.keystore.jks",
    "schema.history.internal.producer.ssl.keystore.password": "***************",
    "schema.history.internal.producer.ssl.truststore.location": "/app/ssl/private/kafka_connect.truststore.jks",
    "schema.history.internal.producer.ssl.truststore.password": "***************",
    "schema.history.internal.producer.ssl.key.password": "***************",
    "schema.history.internal.consumer.security.protocol": "SSL",
    "schema.history.internal.consumer.ssl.keystore.location": "/app/ssl/private/kafka_connect.keystore.jks",
    "schema.history.internal.consumer.ssl.keystore.password": "***************",
    "schema.history.internal.consumer.ssl.truststore.location": "/app/ssl/private/kafka_connect.truststore.jks",
    "schema.history.internal.consumer.ssl.truststore.password": "***************",
    "schema.history.internal.consumer.ssl.key.password": "***************",
    "signal.enabled.channels": "source,kafka",
    "signal.data.collection": "TORGFC.FCUBS.DEBEZIUM_SIGNAL",
    "signal.kafka.topic": "signaling.SRC_ORA_FLX_001.DEV",
    "signal.kafka.bootstrap.servers": "***************:9092,***************:9092",
    "signal.consumer.security.protocol": "SSL",
    "signal.consumer.ssl.keystore.location": "/app/ssl/private/kafka_connect.keystore.jks",
    "signal.consumer.ssl.keystore.password": "***************",
    "signal.consumer.ssl.truststore.location": "/app/ssl/private/kafka_connect.truststore.jks",
    "signal.consumer.ssl.truststore.password": "***************",
    "signal.consumer.ssl.key.password": "***************",
    "connector.class": "io.debezium.connector.oracle.OracleConnector",
    "decimal.handling.mode": "string",
    "key.converter": "org.apache.kafka.connect.storage.StringConverter",
    "value.converter": "io.confluent.connect.avro.AvroConverter",
    "value.converter.schema.registry.ssl.key.password": "***************",
    "value.converter.schema.registry.ssl.keystore.location": "/app/ssl/private/kafka_connect.keystore.jks",
    "value.converter.schema.registry.ssl.keystore.password": "***************",
    "value.converter.schema.registry.ssl.truststore.location": "/app/ssl/private/kafka_connect.truststore.jks",
    "value.converter.schema.registry.ssl.truststore.password": "***************",
    "value.converter.schema.registry.url": "https://***************:8081,https://***************:8081",
    "topic.prefix": "namespace.SRC_ORA_FLX_001.DEV",
    "database.hostname": "***************.local",
    "database.port": "1521",
    "database.user": "debezium",
    "database.password": "***************",
    "database.dbname": "torgfc",
    "snapshot.mode": "no_data",
    "log.mining.strategy": "hybrid",
"table.include.list":"FCUBS.ACTB_DAILY_LOG,FCUBS.GETM_FACILITY,FCUBS.ISTB_CONTRACTIS,FCUBS.STTM_CUST_ACCOUNT,FCUBS.FTTB_CONTRACT_MASTER,FCUBS.PCTB_CONTRACT_MASTER,FCUBS.MSTB_DLY_MSG_IN,FCUBS.STTM_CUSTOMER,FCUBS.GETM_LIAB,FCUBS.STTM_CUST_ACCOUNT_LINKAGES,FCUBS.STTM_ACCOUNT_CLASS,FCUBS.STTM_TRN_CODE,FCESB.MWTB_COUNTRY_CODE,FCUBS.STTM_CIF_OTHER_CUSTOM,FCUBS.ACTB_HISTORY,FCUBS.CSTM_NARRATIVE_DETAIL,FCUBS.CSTM_FUNCTION_UDF_FIELDS_MAP,FCUBS.STTM_MKB_TRN_MASTER_CUSTOM,FCUBS.CSTM_FUNCTION_USERDEF_FIELDS,FCUBS.FPTM_PRODUCT_CUSTOM,FCUBS.IFTM_ARC_MAINT,FCUBS.PCTB_GROUP_ORDER_CUSTOM,FCUBS.PCTB_CONTRACT_HISTORY,FCUBS.DETB_RTL_TELLER_CUSTOM,FCUBS.SITB_CONTRACT_MASTER,FCUBS.FPTB_INST_PAYMENT_DETAILS,FCUBS.FPTM_COM_PMT_IN,FCUBS.FPTB_INST_PAY_DET_AFR2_MKB,FCUBS.FPTB_INST_PAYMENT_DETAILS_HIST,FCUBS.FPTM_COM_PMT_IN_HIST,FCUBS.DETB_RTL_TELLER,FCUBS.CSTM_CONTRACT_USERDEF_FIELDS,FCUBS.CSTM_PRODUCT_UDF_FIELDS_MAP,FCUBS.DETB_RTL_TELLER_MKB,FCUBS.FXTB_CONTRACT_MASTER,FCESB.MWTB_HOLDING_TECH_ACCOUNTS,FCUBS.STTM_CUST_ACCOUNT_CUSTOM,FCUBS.ILTM_ACCOUNT,FCUBS.ILTM_GROUP_ACCOUNT_LINK,FCUBS.ILTM_GROUP_CODE_CUSTOM,FCUBS.STTM_ACCOUNT_CLASS_CUSTOM,FCUBS.STTM_DATES,FCUBS.CATM_AMOUNT_BLOCKS",
    "snapshot.locking.mode": "none",
    "snapshot.max.threads": "3",
    "schema.history.internal.store.only.captured.tables.ddl":"true",
    "incremental.snapshot.chunk.size": "102400",
    "principal.service.name": "_dev_confluent_kafka_connect_user",
    "schema.history.internal.store.only.captured.tables.ddl":"true",
    "principal.service.password": "***************",
    "tombstones.on.delete": "false",
    "topic.creation.default.partitions": 5,
    "topic.creation.default.replication.factor":6,
    "transforms": "Reroute",
    "transforms.Reroute.type": "io.debezium.transforms.ByLogicalTableRouter",
    "transforms.Reroute.topic.regex": "namespace.SRC_ORA_FLX_001.DEV.(.*)",
    "transforms.Reroute.topic.replacement": "PRIVATE.FLX.$1.DEV",
    "log.mining.query.filter.mode": "in",
    "log.mining.batch.size.increment":"200000",
    "log.mining.batch.size.default":"200000",
    "log.mining.batch.size.max":"1000000",
    "log.mining.batch.size.min":"200000",
    "schema.include.list": "FCUBS, FCESB, DEBEZIUM"
  }

If the source database is under significant load and generates redo logs intensively, LogMiner struggles to keep up, causing query times to increase indefinitely. Initially, the connector attempts to expand the SCN query range. However, once it reaches the maximum range, the query time for that fixed window continues to grow until it either times out or the SCN is lost because the archive logs are deleted.
We have experimented with log.mining.batch parameters and reduced the redo log size from 8GB to 600MB, but the issue persists. Based on the code, it appears Debezium only advances the mining session's startSCN (the oldest transaction start or last batch end - 1) when it falls into a new log file. This keeps the startSCN anchored at the same position for multiple cycles. Interestingly, we do not observe high PGA usage.
Do you have any insights into what might be causing this, how to optimize Debezium or LogMiner for better performance, or have we simply reached the physical limits of LogMiner?

Best regards,
Greg

Chris Cranford

unread,
Feb 3, 2026, 8:04:51 PMFeb 3
to debe...@googlegroups.com
Hi Greg, thanks for the feedback. 

So regarding the low watermark update, this was recently fixed in dbz#1560 [1]. While this fix was backported to Debezium 3.4.1.Final, unfortunately there was a small regression that broke 3.4.1.Final that we were not made aware of until after the release was published. I would suggest, if you are able, to consider using the latest snapshot build of 3.5.0.Alpha2, which will be out in the next ~2 weeks or you could always patch 3.4.0.Final with the changes found in this pull request [2].

Secondly, regarding the file size. Once you have the above fix I referenced, I would recommend shifting back to the larger redo log file sizes. This will reduce the frequency of log switches, giving the connector much reprieve for mining. However, the performance degradation you're experiencing, do you recall if this began in an earlier version, and if so do  you recall which specific version you observed it?

Thanks,
-cc

[1]: https://github.com/debezium/dbz/issues/1560
[2]: https://github.com/debezium/debezium/pull/7029/changes
--
You received this message because you are subscribed to the Google Groups "debezium" group.
To unsubscribe from this group and stop receiving emails from it, send an email to debezium+u...@googlegroups.com.
To view this discussion visit https://groups.google.com/d/msgid/debezium/ab7d7541-f6c6-4734-857f-57d9873b5790n%40googlegroups.com.

Gergely Jahn

unread,
Feb 4, 2026, 4:27:53 AMFeb 4
to debezium
Hi Chris,

Thank you for the quick response. We are already testing the fix you proposed. We began this project with version 3.0.0, but we switched to 3.4.0 prior to starting our performance tests.

I have another question regarding the low watermark update. It appears to be tied to the oldest transaction's start SCN. This maintains a larger reading window, and long-running transactions force this window to remain wide.

Since changes are buffered in memory (or via Infinispan), why is it necessary to anchor the start SCN to the beginning of the oldest transaction?


Best,
Greg

Chris Cranford

unread,
Feb 4, 2026, 9:35:50 AMFeb 4
to debe...@googlegroups.com
Hi Greg -

This is unfortunately a quirk of LogMiner that can lead to all sorts of random data loss scenarios.  

While we were reviewing DBZ-8747 [1], we found that if the start of the transaction was not pinned, not only could random unsupported operation events be seen, but things like:
  • Certain fields like USERNAME or CLIENT_ID would be correctly populated when the START event was in the mining pass, and set to UNKNOWN when not.
    This happens because Oracle records certain attributes as part of the transaction START event only and uses the START to populate the same fields in the transaction's event stream. If the START event is not visible in the pass, the fields are populated with UNKNOWN. This leads to situations where transactions that should be included will only have their earlier portion included and the later portion excluded if its split across multiple passes. The same happens for excluded transactions by user/client, the earlier portion is excluded while the subsequent passes are included.
  • The transaction start pinning is mandatory for when LOB is enabled, and has been since the 1.x days. So if you enable LOB capture or have been using LOB capture, this has always been the case.
  • Individual events that are split across multiple mining passes due to the event's size (often for very wide tables or tables with lots of text-based columns), user reported that part of events were being truncated. This again was due to the fact that unsupported operations were being partially detected or that parts of an event were being omitted by LogMiner with no warning or indicator.
  • In some rare cases, this lead to the start of the transaction being mined, but the subsequent LogMiner pass did not return the last portion of the transaction, leaving the transaction active in the buffer.

Now, not all or in some cases none of these corner cases were seen by a subset of users.

There is currently a PR in the works [2] that introduces a new windowing technique that aims to avoid long running transactions having the same impact on mining like they currently do; however, the new algorithm has the potential to reintroduce the above corner cases. So this feature is purely opt-in and the risks are documented that users should be aware. 

Unfortunately, this is all based on LogMiner limitations, so we're trying to provide solutions for environments that need absolute data reliability versus others that need higher performance at the risk of data reliability.

Let us know if you have any questions.
-cc

[1]: https://issues.redhat.com/browse/DBZ-8747
[2]: https://github.com/debezium/debezium/pull/7035

Gergely Jahn

unread,
Feb 4, 2026, 10:01:22 AMFeb 4
to debezium
Hi Chris,

Thanks for the detailed description; everything is clear now. After applying the patch, the connector's performance has improved, though it still feels a bit slow.
We struggled to find a precise way to define performance, as TPS or SCN/sec can represent very different volumes of data change. We eventually decided to use redo log generation per second as our load metric to test if the connector can keep up.
We are currently hitting around 13 MB/s of redo log generation, and there may be more to come. I suspect we might be hitting a bottleneck since LogMiner is single-threaded and sequential.
Does this throughput sound realistic, or are we overlooking a specific configuration? Additionally, what other metrics or settings should we validate to ensure optimal performance?

Best, Greg

Chris Cranford

unread,
Feb 4, 2026, 12:17:24 PMFeb 4
to debe...@googlegroups.com
Hi Greg -

While LogMiner is single-threaded and sequentially reads the logs, 13MB/s is approximately ~47GB/hour, which LogMiner can easily handle without blinking an eye. In our experience, when you have low redo log generation (which I would consider 47GB/hour to be), the most common latency impact comes from IO, where Debezium enters a wait pattern while the logs are read off disk.

-cc

Gergely Jahn

unread,
Feb 4, 2026, 1:23:17 PMFeb 4
to debezium
Hi Chris,

When we increase the load and the redo log volume triples, the connector starts falling behind. The majority of the time is spent on the LogMiner query itself; processing the results takes almost no time.
Our DBA mentioned that during the SQL query, they see 99% ASM IO FOR NON-BLOCKING POLL in the v$session view. I assume this means LogMiner is waiting for the logs to be read from the disk.
The queries take longer and longer and eventually Debezium cancels the request after 10 minutes timeout. So Logminer is idle most of the time but reading the SCN range grows when the redo log volume increases significantly.
It seems like a storage bottleneck but the database is running on an Exadata server, and logs are stored locally.
Do you have any experience with similar issues, or any ideas on how we could improve this?
Do you have any reference for Logminer's performance limits?

Best, Greg

Chris Cranford

unread,
Feb 4, 2026, 5:01:49 PMFeb 4
to debe...@googlegroups.com
Hi Greg, can you tell me if the archive/redo logs are stored on separate disk groups on ASM or the same disk groups as the other database dbf files?

Gergely Jahn

unread,
Feb 4, 2026, 7:11:20 PMFeb 4
to debezium

Hi Chris,

I will follow up tomorrow morning once our DBA provides more feedback.
 In the meantime, I’ve received a log analysis from my colleagues with the following definitions:

  • Setup(s): The duration of the start mining session calls.

  • Batch SCN: The size of the query window.

  • Window SCN: The total size of the mining session window.

My observations:

  1. Fetch speed: Data fetching seems quite slow, especially considering we are on optimized hardware (Exadata).

  2. Scaling issues: It is clearly visible that query times increase as the Window SCN grows, likely due to long-running transactions. When the window exceeds log.mining.batch.size.max, I see the mining session starting from the same SCN repeatedly.

You mentioned in a previous comment that you know of several very large databases (millions of changes per hour) using a single connector successfully.
Could you share some specific metrics on achievable performance with LogMiner? I’m particularly interested in TPS and redo log volume/sec.
I’m trying to understand how close we are to the architectural limits, as we will eventually need to mine significantly more data in production (approx. 100–200 MB/s).

loganalysis.txt

Chris Cranford

unread,
Feb 4, 2026, 8:40:46 PMFeb 4
to debe...@googlegroups.com
Hi Greg,

So one thing I find a bit unusual in your statistics are the points, particularly earlier on where the Fetch time randomly spikes. For example, fetch queries were mostly 20s or less until this
73  | 2026-02-04 13:17:12.692   | 0,006      | 25,479     | 25,485     | 114 784         | 129 284         | 22 661       | 2        | OK      
74  | 2026-02-04 13:17:41.206   | 0,006      | 41,865     | 41,871     | 154 088         | 283 371         | 27 177       | 7        | OK      
75  | 2026-02-04 13:18:26.106   | 0,006      | 71,925     | 71,931     | 200 000         | 483 370         | 45 077       | 2        | OK      
What I would do is take these timings, and have the DBA share the alert logs with you, if possible, and see what might have been happening at the times you see spikes. In some environments, I've seen similar scenarios where LogMiner is reading the archive logs just fine, and then all of a sudden the read of an archive log takes 2, 4, or 10x longer than it had been. In the cases we saw, this was due to materialized view refreshes, which creates a tremendous amount of contention on the Oracle redo system. Do you have something similar?

I'm afraid Oracle's Terms of Use and Licensing prohibits us from sharing any benchmarks or thresholds about LogMiner's performance. 

What I can share with you is we have multiple users who have environments generating between 250-400GB of redo/hour using LogMiner. Those who have high volume environments that are using it well have a very close communication between the connector engineers, developers, and the DBAs to make sure the system is running at peak optimal performance. This includes making sure that PL/SQL batch jobs, data loads, and applications are performing SQL operations with CDC in mind. 

We are also actively exploring the idea to introduce support for downstream mining databases for LogMiner and XStream. This would allow DBAs to ship logs from production to a downstream system who's only job is to replay changes from the logs for consumers like Debezium. This eliminates user load on the system, and making the ability to mine the logs far more efficient than doing it on production systems. Once we believe this feature is ready for user testing, we'll be sure to share a post on the Debezium blog and add a note in the release notes.

In closing, you mention 100-200MB/second. In this case you're talking anywhere between 360GB to 720GB/hour. I have no doubt that LogMiner can handle this, but as I mentioned, this would require a significant amount of cross-functional teams to work together to use the database at optimal levels to minimize unnecessary redo. If you believe you're going to be approaching the 720GB to 1TB hour thresholds, I might suggest you consider XStream due to how it integrates with LogMiner in a persistent way to provide an improved layer of efficiency over the native LogMiner adapter.

-cc

Chris Cranford

unread,
Feb 4, 2026, 8:45:04 PMFeb 4
to debe...@googlegroups.com
Greg,

I want to clarify one point that I don't feel I made clear.

Because of how the downstream mining solution works, and the elimination of all the noise on the redo system other than the replaying of logs, the speed that LogMiner can operate and remain caught up should be much higher on a mining database than production. So while moving to XStream with the high volume traffic you mentioned would likely make the most sense capturing changes directly on a production system, sticking with LogMiner and migrating to a downstream mining solution when that's ready would also be something I'd recommend considering; especially if you don't already have an Oracle GoldenGate license.

Let us know if you have any further questions.
-cc

Gergely Jahn

unread,
Feb 5, 2026, 3:53:58 AMFeb 5
to debezium
Hi Chris,

Thank you for the detailed feedback. It sounds like while LogMiner is technically capable of handling our load, 
it requires very intentional design of the source database and its underlying processes.
Unfortunately, our use case offers almost zero flexibility for changes on the source DB side. 
Given that, would I be correct in assuming that we’ve reached the limit(or the limit will be definitely below the expected 720GB/hour) of tuning LogMiner and should instead switch to XStream (which is an option for us)?
Do you have any performance benchmarks for XStream in terms of redo log volume per hour? I’m curious to know how much of a performance gain we can realistically expect.

Best, Greg




Chris Cranford

unread,
Feb 5, 2026, 4:22:47 PMFeb 5
to debe...@googlegroups.com
Hi Greg -

I'm can't share any specifics of redo log volume, that would be skirting Oracle's ToU and Licensing; so those questions are best proposed directly to Oracle. 

I've had people share mixed results, some had better performance while a select few have indicated they appeared to have worse. I genuinely think the experience heavily relies on hardware, transaction volume, and database configuration. XStream adds an extra 3-5 processes on the database server to handle the capture, coordination, transaction collection, and the server that sends data to the client. If your Oracle instance is already reaching the hardware's capacity limits, adding XStream could make things worse. And as I was discussing with Vinoth in another thread, even Oracle concurs that if your environment has long running transactions, XStream can potentially suffer by buffering such large amounts of data.

But I would reach out to Oracle, they can look at your hardware and volume and provides a much more concrete estimate than we could.

-cc

Gergely Jahn

unread,
Feb 5, 2026, 5:01:58 PMFeb 5
to debezium
Hi Chris,

Thank you. In the meantime, it has turned out that the redo and data files are on the same disk group in production.
This differs from our test system—where the logs I attached originated—as that environment uses separate disk groups.
I know that it is a good practice to use different groups, but   
are you aware of any performance issues related to this configuration?

Bets,
Greg

Chris Cranford

unread,
Feb 6, 2026, 4:32:59 PMFeb 6
to debe...@googlegroups.com
Hi Greg -

Where it's most often seen as a problem is in time windows where you have high writes on the database. During this period you will have a high volume of writes performed by the LGWR process, which will be recording the changes to the archive/redo logs concurrently the DBWR is loading, flushing, and evicting data pages from the database cache. 

So by having both database files and log files on the same disk groups, you create a natural amount of potential IO contention between these two vital and core database processes.

Then when you add LogMiner into the mix, LogMiner will also be creating read IO overhead. When all three are fighting for IO, which is a limited resource on the same disk group, you may find higher than desired IO waits.

Hope that helps.
-cc

Gergely Jahn

unread,
Feb 16, 2026, 3:06:08 PM (7 days ago) Feb 16
to debezium
Hi Chris,

Thank you for all the help. I tried the XStream adapter and it looks a lot more responsive and can keep up with 30-40MB/sec redo rate. It can be scaled horizontally so I will check what is the maximum redo rate we can hit with it, once we get into an environment where there is enough load(and CPU cores in the Oracle server) for that.
I found a different performance issue with the XStream adapter.
If I start the connector while the DB is actively writing the redo logs(for example I run a script that executes transactions in a loop before starting the connector) the connector can only hit 10 TPS.
The queue of the outbound server is constantly around 9990 (suspiciously 10K -10) and the connector only takes 10 records in every iteration. 
When I restart the connector the performance increases significantly and reaches ~40K TPS.
As I see the capture process gets restarted on Oracle side as well.
If I run the connector first and then run my script that generates the load it can easily keep up with the load and reaches ~40K TPS.

Have you seen anything similar?

Best,
Greg

Chris Cranford

unread,
Feb 16, 2026, 11:40:13 PM (6 days ago) Feb 16
to debe...@googlegroups.com
Hi Greg -

We have not been made aware of this, but the scenario you describe makes sense with how the outbound server works. It sounds as though the server on the first connection is in cold-boot. With the LCR queue likely spilled to disk, there is likely a high IO overhead on the database server to prepare the outbound server state, group LCRs by transactions, and prepare for sending the changes to Debezium. When you stop and restart the connector, the outbound server continues to retain its state on Oracle's side, and so the reconnection begins to operate in a warm-boot state with a lot of data already prepared for consumption.

Can you start the cold-boot test again and please share the column values in V$XSTREAM_OUTBOUND_SERVER for the outbound server you're connecting to? 

Thanks,
-cc

Gergely Jahn

unread,
Feb 17, 2026, 4:14:19 AM (6 days ago) Feb 17
to debezium
Hi Chris,

select * from V$XSTREAM_OUTBOUND_SERVER;
 
Column,Value (1)
SID,635
SERIAL#,1497
SPID,4555
SERVER_NAME,XOUT
STARTUP_TIME,2026-02-17 07:22:33
STATE,IDLE
XIDUSN,8
XIDSLT,33
XIDSQN,765
COMMITSCN,1607034
TOTAL_TRANSACTIONS_SENT,67
MESSAGE_SEQUENCE,21
TOTAL_MESSAGES_SENT,1407
SEND_TIME,2026-02-17 07:24:23
LAST_SENT_MESSAGE_NUMBER,1607034
LAST_SENT_MESSAGE_CREATE_TIME,2026-02-17 07:22:31
ELAPSED_SEND_TIME,0
COMMIT_POSITION,0x000000000018857A0000000100000001000000000018857A000000010000000102
LAST_SENT_POSITION,0x000000000018857A0000000100000001000000000018857A000000010000000102
BYTES_SENT,693865
COMMITTED_DATA_ONLY,YES
CON_ID,1


Column,Value (1)
SID,635
SERIAL#,1497
SPID,4555
SERVER_NAME,XOUT
STARTUP_TIME,2026-02-17 07:22:33
STATE,SEND TRANSACTION
XIDUSN,5
XIDSLT,3
XIDSQN,1080
COMMITSCN,1607203
TOTAL_TRANSACTIONS_SENT,134
MESSAGE_SEQUENCE,21
TOTAL_MESSAGES_SENT,2814
SEND_TIME,2026-02-17 07:25:37
LAST_SENT_MESSAGE_NUMBER,1607203
LAST_SENT_MESSAGE_CREATE_TIME,2026-02-17 07:22:31
ELAPSED_SEND_TIME,0
COMMIT_POSITION,0x000000000018862300000001000000010000000000188623000000010000000102
LAST_SENT_POSITION,0x000000000018862300000001000000010000000000188623000000010000000102
BYTES_SENT,1343883
COMMITTED_DATA_ONLY,YES
CON_ID,1


Column,Value (1)
SID,635
SERIAL#,1497
SPID,4555
SERVER_NAME,XOUT
STARTUP_TIME,2026-02-17 07:22:33
STATE,IDLE
XIDUSN,2
XIDSLT,31
XIDSQN,757
COMMITSCN,1607306
TOTAL_TRANSACTIONS_SENT,175
MESSAGE_SEQUENCE,21
TOTAL_MESSAGES_SENT,3675
SEND_TIME,2026-02-17 07:26:23
LAST_SENT_MESSAGE_NUMBER,1607306
LAST_SENT_MESSAGE_CREATE_TIME,2026-02-17 07:22:31
ELAPSED_SEND_TIME,1
COMMIT_POSITION,0x000000000018868A0000000100000001000000000018868A000000010000000102
LAST_SENT_POSITION,0x000000000018868A0000000100000001000000000018868A000000010000000102
BYTES_SENT,1739789
COMMITTED_DATA_ONLY,YES
CON_ID,1

SELECT * FROM V$BUFFERED_QUEUES;

Column,Value (1)
QUEUE_ID,74490
QUEUE_SCHEMA,C##CFLTADMIN
QUEUE_NAME,Q$_XOUT_2
STARTUP_TIME,2026-02-17 07:20:54
NUM_MSGS,9982
SPILL_MSGS,0
CNUM_MSGS,3414072
CSPILL_MSGS,0
EXPIRED_MSGS,0
OLDEST_MSGID,
OLDEST_MSG_ENQTM,
QUEUE_STATE,NORMAL
ELAPSED_ENQUEUE_TIME,0
ELAPSED_DEQUEUE_TIME,0
ELAPSED_TRANSFORMATION_TIME,0
ELAPSED_RULE_EVALUATION_TIME,0
ENQUEUE_CPU_TIME,0
DEQUEUE_CPU_TIME,0
AVG_MSG_AGE,0
LAST_ENQUEUE_TIME,
LAST_DEQUEUE_TIME,
QUEUE_SIZE,2976
CON_ID,1

Best,
Greg
Reply all
Reply to author
Forward
0 new messages