I see a different scenario with MaxScale 2.5.11, when I have the MariaDBMon Monitor, the ReadWriteSplit Router and a master and slave, both running the MySQL 5.7.33 on AWS RDS.
The case is that I only see connections open to the master while I also see the number of connections for the replica always like 0.
[cluster-monitor]
type=monitor
module=mariadbmon
user=maxmon
password=B13...
servers=dbmaster,dbreplica
monitor_interval=500
backend_connect_timeout=3s
backend_read_timeout=3s
backend_write_timeout=3s
backend_connect_attempts=1
journal_max_age=28800s
disk_space_check_interval=0ms
script_timeout=90s
events=all
failcount=5
ignore_external_masters=false
auto_failover=false
failover_timeout=90s
switchover_timeout=90s
replication_master_ssl=false
verify_master_failure=true
master_failure_timeout=10s
auto_rejoin=false
enforce_read_only_slaves=false
switchover_on_low_disk_space=false
maintenance_on_low_disk_space=true
handle_events=true
assume_unique_hostnames=true
enforce_simple_topology=false
cooperative_monitoring_locks=none
master_conditions=primary_monitor_master
slave_conditions=none
[dbmaster]
address=teste-maxscale.[...].com
extra_port=0
persistmaxtime=0ms
persistpoolmax=0
port=3306
priority=0
protocol=mariadbbackend
proxy_protocol=false
rank=primary
ssl=false
ssl_cert_verify_depth=9
ssl_verify_peer_certificate=false
ssl_verify_peer_host=false
ssl_version=MAX
type=server
[dbreplica]
address=teste-maxscale-read.[...].com
extra_port=0
persistmaxtime=0ms
persistpoolmax=0
port=3306
priority=0
protocol=mariadbbackend
proxy_protocol=false
rank=primary
ssl=false
ssl_cert_verify_depth=9
ssl_verify_peer_certificate=false
ssl_verify_peer_host=false
ssl_version=MAX
type=server
[rwsplit-listener]
type=listener
address=::
authenticator_options=
port=3306
protocol=mariadbclient
service=rwsplit-service
ssl=false
ssl_cert_verify_depth=9
ssl_verify_peer_certificate=false
ssl_verify_peer_host=false
ssl_version=MAX
type=listener
[rwsplit-service]
type=service
router=readwritesplit
user=maxusr
password=FFC7...
enable_root_user=false
max_connections=0
connection_timeout=0
net_write_timeout=0
auth_all_servers=false
strip_db_esc=true
log_auth_warnings=true
session_track_trx_state=false
retain_last_statements=-1
session_trace=false
rank=primary
connection_keepalive=300s
causal_reads=false
causal_reads_timeout=10000ms
delayed_retry=false
delayed_retry_timeout=10000ms
disable_sescmd_history=false
lazy_connect=false
master_accept_reads=false
master_failure_mode=fail_on_write
master_reconnection=true
max_sescmd_history=1500
max_slave_connections=255
optimistic_trx=false
prune_sescmd_history=true
retry_failed_reads=true
slave_connections=255
slave_selection_criteria=LEAST_CURRENT_OPERATIONS
strict_multi_stmt=false
strict_sp_calls=false
transaction_replay=false
transaction_replay_attempts=5
transaction_replay_max_size=1073741824
transaction_replay_retry_on_deadlock=false
use_sql_variables_in=master
targets=dbmaster,dbreplica
Anything you see which blocks connections to be also opened to the dbreplica? The above is a `cat --persistir/*` so I can collect all configurations. Do you think this can be a side effect of using dynamic commands to set up a replication cluster on MaxScale?