[COMMIT scylla-cluster-tests master] refactor(TestConfig): convert usages to instance

瀏覽次數:0 次
跳到第一則未讀訊息

Commit Bot

<bot@cloudius-systems.com>
未讀,
2021年7月29日 上午9:53:132021/7/29
收件者:scylladb-dev@googlegroups.com、MaciekCisowski
From: MaciekCisowski <maciej....@scylladb.com>
Committer: Bentsi <ben...@scylladb.com>
Branch: master

refactor(TestConfig): convert usages to instance

---
diff --git a/mgmt_cli_test.py b/mgmt_cli_test.py
--- a/mgmt_cli_test.py
+++ b/mgmt_cli_test.py
@@ -46,6 +46,7 @@ class BackupFunctionsMixIn:

backup_azure_blob_service = None
backup_azure_blob_sas = None
+ test_config = TestConfig()

@cached_property
def locations(self) -> list[str]:
@@ -82,8 +83,8 @@ def install_azcopy_dependencies(self, node):
tar xz -C /usr/bin --strip-components 1 --wildcards '*/azcopy'
"""))
self.backup_azure_blob_service = \
- f"https://{TestConfig.backup_azure_blob_credentials['account']}.blob.core.windows.net/"
- self.backup_azure_blob_sas = TestConfig.backup_azure_blob_credentials["download_sas"]
+ f"https://{self.test_config.backup_azure_blob_credentials['account']}.blob.core.windows.net/"
+ self.backup_azure_blob_sas = self.test_config.backup_azure_blob_credentials["download_sas"]

@staticmethod
def download_from_s3(node, source, destination):
diff --git a/sct.py b/sct.py
--- a/sct.py
+++ b/sct.py
@@ -83,7 +83,7 @@ def install_package_from_dir(ctx, _, directories):

def add_file_logger(level: int = logging.DEBUG) -> None:
cmd_path = "-".join(click.get_current_context().command_path.split()[1:])
- logdir = TestConfig.make_new_logdir(update_latest_symlink=False, postfix=f"-{cmd_path}")
+ logdir = TestConfig().make_new_logdir(update_latest_symlink=False, postfix=f"-{cmd_path}")
handler = logging.FileHandler(os.path.join(logdir, "hydra.log"))
handler.setLevel(level)
LOGGER.addHandler(handler)
@@ -159,7 +159,7 @@ def clean_resources(ctx, post_behavior, user, test_id, logdir, dry_run, backend)
params = (user_param, )
else:
if not logdir and (post_behavior or not test_id):
- logdir = TestConfig.base_logdir()
+ logdir = TestConfig().base_logdir()

if not test_id and (latest_test_id := search_test_id_in_latest(logdir)):
click.echo(f"Latest TestId in {logdir} is {latest_test_id}")
@@ -748,7 +748,7 @@ def run_test(argv, backend, config, logdir):
if logdir:
os.environ['_SCT_LOGDIR'] = logdir

- logfile = os.path.join(TestConfig.logdir(), 'output.log')
+ logfile = os.path.join(TestConfig().logdir(), 'output.log')
sys.stdout = OutputLogger(logfile, sys.stdout)
sys.stderr = OutputLogger(logfile, sys.stderr)

@@ -770,7 +770,7 @@ def run_pytest(target, backend, config, logdir):
if logdir:
os.environ['_SCT_LOGDIR'] = logdir

- logfile = os.path.join(TestConfig.logdir(), 'output.log')
+ logfile = os.path.join(TestConfig().logdir(), 'output.log')
sys.stdout = OutputLogger(logfile, sys.stdout)
sys.stderr = OutputLogger(logfile, sys.stderr)
if not target:
diff --git a/sdcm/cluster.py b/sdcm/cluster.py
--- a/sdcm/cluster.py
+++ b/sdcm/cluster.py
@@ -196,6 +196,7 @@ def __init__(self, name, parent_cluster, ssh_login_info=None, base_logdir=None,
self.name = name
self.rack = rack
self.parent_cluster = parent_cluster # reference to the Cluster object that the node belongs to
+ self.test_config = TestConfig()
self.ssh_login_info = ssh_login_info
self.logdir = os.path.join(base_logdir, self.name) if base_logdir else None
self.dc_idx = dc_idx
@@ -264,7 +265,7 @@ def init(self) -> None:
# Start task threads after ssh is up, otherwise the dense ssh attempts from task
# threads will make SCT builder to be blocked by sshguard of gce instance.
self.wait_ssh_up(verbose=True)
- if not TestConfig.REUSE_CLUSTER:
+ if not self.test_config.REUSE_CLUSTER:
self.set_hostname()

self.start_task_threads()
@@ -277,22 +278,22 @@ def _init_remoter(self, ssh_login_info):
self.log.debug(self.remoter.ssh_debug_cmd())

def _init_port_mapping(self):
- if TestConfig.IP_SSH_CONNECTIONS == 'public' or TestConfig.MULTI_REGION:
- if TestConfig.RSYSLOG_ADDRESS:
+ if self.test_config.IP_SSH_CONNECTIONS == 'public' or self.test_config.MULTI_REGION:
+ if self.test_config.RSYSLOG_ADDRESS:
try:
ContainerManager.destroy_container(self, "auto_ssh:rsyslog", ignore_keepalive=True)
except NotFound:
pass
ContainerManager.run_container(self, "auto_ssh:rsyslog",
- local_port=TestConfig.RSYSLOG_ADDRESS[1],
- remote_port=TestConfig.RSYSLOG_SSH_TUNNEL_LOCAL_PORT)
- if TestConfig.LDAP_ADDRESS and self.parent_cluster.node_type == "scylla-db":
+ local_port=self.test_config.RSYSLOG_ADDRESS[1],
+ remote_port=self.test_config.RSYSLOG_SSH_TUNNEL_LOCAL_PORT)
+ if self.test_config.LDAP_ADDRESS and self.parent_cluster.node_type == "scylla-db":
try:
ContainerManager.destroy_container(self, "auto_ssh:ldap", ignore_keepalive=True)
except NotFound:
pass
ContainerManager.run_container(self, "auto_ssh:ldap",
- local_port=TestConfig.LDAP_ADDRESS[1],
+ local_port=self.test_config.LDAP_ADDRESS[1],
remote_port=LDAP_SSH_TUNNEL_LOCAL_PORT)

@property
@@ -330,7 +331,7 @@ def _set_keep_alive(self):

def set_keep_alive(self):
node_type = None if self.parent_cluster is None else self.parent_cluster.node_type
- if TestConfig.should_keep_alive(node_type) and self._set_keep_alive():
+ if self.test_config.should_keep_alive(node_type) and self._set_keep_alive():
self.log.info("Keep this node alive")

@property
@@ -346,8 +347,8 @@ def short_hostname(self):
def system_log(self):
orig_log_path = os.path.join(self.logdir, 'system.log')

- if TestConfig.RSYSLOG_ADDRESS:
- rsys_log_path = os.path.join(TestConfig.logdir(), 'hosts', self.short_hostname, 'messages.log')
+ if self.test_config.RSYSLOG_ADDRESS:
+ rsys_log_path = os.path.join(self.test_config.logdir(), 'hosts', self.short_hostname, 'messages.log')
if os.path.exists(rsys_log_path) and (not os.path.islink(orig_log_path)):
os.symlink(os.path.relpath(rsys_log_path, self.logdir), orig_log_path)
return rsys_log_path
@@ -699,9 +700,9 @@ def _refresh_instance_state(self):

@property
def ip_address(self):
- if TestConfig.IP_SSH_CONNECTIONS == "ipv6":
+ if self.test_config.IP_SSH_CONNECTIONS == "ipv6":
return self.ipv6_ip_address
- elif TestConfig.INTRA_NODE_COMM_PUBLIC:
+ elif self.test_config.INTRA_NODE_COMM_PUBLIC:
return self.public_ip_address
else:
return self.private_ip_address
@@ -712,9 +713,9 @@ def external_address(self):
the communication address for usage between the test and the nodes
:return:
"""
- if TestConfig.IP_SSH_CONNECTIONS == "ipv6":
+ if self.test_config.IP_SSH_CONNECTIONS == "ipv6":
return self.ipv6_ip_address
- elif TestConfig.IP_SSH_CONNECTIONS == 'public' or TestConfig.INTRA_NODE_COMM_PUBLIC:
+ elif self.test_config.IP_SSH_CONNECTIONS == 'public' or self.test_config.INTRA_NODE_COMM_PUBLIC:
return self.public_ip_address
else:
return self.private_ip_address
@@ -840,7 +841,7 @@ def __str__(self):
self.name,
self.public_ip_address,
self.private_ip_address,
- " | %s" % self.ipv6_ip_address if TestConfig.IP_SSH_CONNECTIONS == "ipv6" else "",
+ " | %s" % self.ipv6_ip_address if self.test_config.IP_SSH_CONNECTIONS == "ipv6" else "",
self.is_seed)

def restart(self):
@@ -929,7 +930,7 @@ def start_task_threads(self):
elif self.node_type == 'monitor':
# TODO: start alert manager thread here when start_task_threads will be run after node setup
# self.start_alert_manager_thread()
- if TestConfig.BACKTRACE_DECODING:
+ if self.test_config.BACKTRACE_DECODING:
self.start_decode_on_monitor_node_thread()

def get_backtraces(self):
@@ -1303,7 +1304,7 @@ def _read_system_log_and_publish_events(self,
json_log = json.loads(line)
except Exception: # pylint: disable=broad-except
pass
- if not start_from_beginning and TestConfig.RSYSLOG_ADDRESS:
+ if not start_from_beginning and self.test_config.RSYSLOG_ADDRESS:
line = line.strip()
if not exclude_from_logging:
LOGGER.debug(line)
@@ -1376,7 +1377,8 @@ def filter_backtraces(backtrace):
try:
if (last_error and
backtrace['event'].line_number <= filter_backtraces.last_error.line_number + 20
- and not filter_backtraces.last_error.type == 'BACKTRACE' and backtrace['event'].type == 'BACKTRACE'):
+ and not filter_backtraces.last_error.type == 'BACKTRACE'
+ and backtrace['event'].type == 'BACKTRACE'):
last_error.raw_backtrace = "\n".join(backtrace['backtrace'])
backtrace['event'].dont_publish()
return False
@@ -1390,10 +1392,10 @@ def filter_backtraces(backtrace):
backtraces = list(filter(filter_backtraces, backtraces))

for backtrace in backtraces:
- if TestConfig.BACKTRACE_DECODING and backtrace["event"].raw_backtrace:
+ if self.test_config.BACKTRACE_DECODING and backtrace["event"].raw_backtrace:
scylla_debug_info = self.get_scylla_debuginfo_file()
self.log.debug("Debug info file %s", scylla_debug_info)
- TestConfig.DECODING_QUEUE.put({
+ self.test_config.DECODING_QUEUE.put({
"node": self,
"debug_file": scylla_debug_info,
"event": backtrace["event"],
@@ -1413,16 +1415,16 @@ def decode_backtrace(self):
event = None
obj = None
try:
- obj = TestConfig.DECODING_QUEUE.get(timeout=5)
+ obj = self.test_config.DECODING_QUEUE.get(timeout=5)
if obj is None:
- TestConfig.DECODING_QUEUE.task_done()
+ self.test_config.DECODING_QUEUE.task_done()
break
event = obj["event"]
if not scylla_debug_file:
scylla_debug_file = self.copy_scylla_debug_info(obj["node"], obj["debug_file"])
output = self.decode_raw_backtrace(scylla_debug_file, " ".join(event.raw_backtrace.split('\n')))
event.backtrace = output.stdout
- TestConfig.DECODING_QUEUE.task_done()
+ self.test_config.DECODING_QUEUE.task_done()
except queue.Empty:
pass
except Exception as details: # pylint: disable=broad-except
@@ -1431,7 +1433,7 @@ def decode_backtrace(self):
if event:
event.publish()

- if self.termination_event.isSet() and TestConfig.DECODING_QUEUE.empty():
+ if self.termination_event.isSet() and self.test_config.DECODING_QUEUE.empty():
break

def copy_scylla_debug_info(self, node, debug_file):
@@ -1549,14 +1551,13 @@ def remote_manager_yaml(self):
def remote_manager_agent_yaml(self):
return self._remote_yaml(path=SCYLLA_MANAGER_AGENT_YAML_PATH)

- @staticmethod
- def get_openldap_config():
- if TestConfig.LDAP_ADDRESS is None:
+ def get_openldap_config(self):
+ if self.test_config.LDAP_ADDRESS is None:
return {}
- ldap_server_ip = '127.0.0.1' if TestConfig.IP_SSH_CONNECTIONS == 'public' \
- or TestConfig.MULTI_REGION else TestConfig.LDAP_ADDRESS[0]
- ldap_port = LDAP_SSH_TUNNEL_LOCAL_PORT if TestConfig.IP_SSH_CONNECTIONS == 'public' or TestConfig.MULTI_REGION else \
- TestConfig.LDAP_ADDRESS[1]
+ ldap_server_ip = '127.0.0.1' if self.test_config.IP_SSH_CONNECTIONS == 'public' \
+ or self.test_config.MULTI_REGION else self.test_config.LDAP_ADDRESS[0]
+ ldap_port = LDAP_SSH_TUNNEL_LOCAL_PORT if self.test_config.IP_SSH_CONNECTIONS == 'public' \
+ or self.test_config.MULTI_REGION else self.test_config.LDAP_ADDRESS[1]
return {'role_manager': 'com.scylladb.auth.LDAPRoleManager',
'ldap_url_template': f'ldap://{ldap_server_ip}:{ldap_port}/'
f'{LDAP_BASE_OBJECT}?cn?sub?(uniqueMember='
@@ -1565,9 +1566,8 @@ def get_openldap_config():
'ldap_bind_dn': f'cn=admin,{LDAP_BASE_OBJECT}',
'ldap_bind_passwd': LDAP_PASSWORD}

- @staticmethod
- def get_ldap_ms_ad_config():
- if TestConfig.LDAP_ADDRESS is None:
+ def get_ldap_ms_ad_config(self):
+ if self.test_config.LDAP_ADDRESS is None:
return {}
ldap_ms_ad_credentials = KeyStore().get_ldap_ms_ad_credentials()
return {'ldap_attr_role': 'cn',
@@ -1578,14 +1578,13 @@ def get_ldap_ms_ad_config():
f'(member=CN={{USER}},DC=scylla-qa,DC=com)',
'role_manager': 'com.scylladb.auth.LDAPRoleManager'}

- @staticmethod
- def get_saslauthd_config():
- if TestConfig.LDAP_ADDRESS is None:
+ def get_saslauthd_config(self):
+ if self.test_config.LDAP_ADDRESS is None:
return {}
- ldap_server_ip = '127.0.0.1' if TestConfig.IP_SSH_CONNECTIONS == 'public' or TestConfig.MULTI_REGION else TestConfig.LDAP_ADDRESS[
- 0]
- ldap_port = LDAP_SSH_TUNNEL_LOCAL_PORT if TestConfig.IP_SSH_CONNECTIONS == 'public' or TestConfig.MULTI_REGION else \
- TestConfig.LDAP_ADDRESS[1]
+ ldap_server_ip = '127.0.0.1' if self.test_config.IP_SSH_CONNECTIONS == 'public' \
+ or self.test_config.MULTI_REGION else self.test_config.LDAP_ADDRESS[0]
+ ldap_port = LDAP_SSH_TUNNEL_LOCAL_PORT if self.test_config.IP_SSH_CONNECTIONS == 'public' \
+ or self.test_config.MULTI_REGION else self.test_config.LDAP_ADDRESS[1]
return {'ldap_servers': f'ldap://{ldap_server_ip}:{ldap_port}/',
'ldap_search_base': f'ou=Person,{LDAP_BASE_OBJECT}',
'ldap_bind_dn': f'cn=admin,{LDAP_BASE_OBJECT}',
@@ -1879,7 +1878,7 @@ def install_manager_agent(self, package_path: Optional[str] = None) -> None:
tls_key_file = SCYLLA_MANAGER_TLS_KEY_FILE

with self.remote_manager_agent_yaml() as manager_agent_yaml:
- manager_agent_yaml["auth_token"] = TestConfig.test_id()
+ manager_agent_yaml["auth_token"] = self.test_config.test_id()
manager_agent_yaml["tls_cert_file"] = tls_cert_file
manager_agent_yaml["tls_key_file"] = tls_key_file
manager_agent_yaml["prometheus"] = f":{self.parent_cluster.params.get('manager_prometheus_port')}"
@@ -1901,8 +1900,8 @@ def update_manager_agent_config(self, region: Optional[str] = None) -> None:
elif backup_backend == "gcs":
pass
elif backup_backend == "azure":
- backup_backend_config["account"] = TestConfig.backup_azure_blob_credentials["account"]
- backup_backend_config["key"] = TestConfig.backup_azure_blob_credentials["key"]
+ backup_backend_config["account"] = self.test_config.backup_azure_blob_credentials["account"]
+ backup_backend_config["key"] = self.test_config.backup_azure_blob_credentials["key"]
else:
raise ValueError(f"{backup_backend=} is not supported")

@@ -2823,7 +2822,7 @@ def run_startup_script(self):
startup_script_remote_path = '/tmp/sct-startup.sh'

with tempfile.NamedTemporaryFile(mode='w+', delete=False, encoding='utf-8') as tmp_file:
- tmp_file.write(TestConfig.get_startup_script())
+ tmp_file.write(self.test_config.get_startup_script())
tmp_file.flush()
self.remoter.send_files(src=tmp_file.name, dst=startup_script_remote_path) # pylint: disable=not-callable

@@ -3018,10 +3017,11 @@ class BaseCluster: # pylint: disable=too-many-instance-attributes,too-many-publ
def __init__(self, cluster_uuid=None, cluster_prefix='cluster', node_prefix='node', n_nodes=3, params=None,
region_names=None, node_type=None, extra_network_interface=False):
self.extra_network_interface = extra_network_interface
+ self.test_config = TestConfig()
if params is None:
params = {}
if cluster_uuid is None:
- self.uuid = TestConfig.test_id()
+ self.uuid = self.test_config.test_id()
else:
self.uuid = cluster_uuid
self.node_type = node_type
@@ -3045,7 +3045,7 @@ def __init__(self, cluster_uuid=None, cluster_prefix='cluster', node_prefix='nod
# default 'cassandra' password is weak password, MS AD doesn't allow to use it.
self.added_password_suffix = False

- if TestConfig.REUSE_CLUSTER:
+ if self.test_config.REUSE_CLUSTER:
# get_node_ips_param should be defined in child
self._node_public_ips = self.params.get(self.get_node_ips_param(public_ip=True)) or []
self._node_private_ips = self.params.get(self.get_node_ips_param(public_ip=False)) or []
@@ -3073,7 +3073,7 @@ def auto_bootstrap(self):
def tags(self) -> Dict[str, str]:
key = self.node_type if "db" not in self.node_type else "db"
action = self.params.get(f"post_behavior_{key}_nodes")
- return {**TestConfig.common_tags(),
+ return {**self.test_config.common_tags(),
"NodeType": str(self.node_type),
"keep_action": "terminate" if action == "destroy" else "", }

@@ -3174,7 +3174,7 @@ def terminate_node(self, node):
name=node.name,
public_ip=node.public_ip_address,
private_ip=node.private_ip_address,
- ipv6_ip=node.ipv6_ip_address if TestConfig.IP_SSH_CONNECTIONS == "ipv6" else '',
+ ipv6_ip=node.ipv6_ip_address if self.test_config.IP_SSH_CONNECTIONS == "ipv6" else '',
ip_address=node.ip_address,
shards=node.scylla_shards,
termination_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
@@ -3525,12 +3525,12 @@ def __init__(self, *args, **kwargs):
self.nemesis = []
self.nemesis_threads = []
self.nemesis_count = 0
+ self.test_config = TestConfig()
self._node_cycle = None
super().__init__(*args, **kwargs)

- @staticmethod
- def get_node_ips_param(public_ip=True):
- if TestConfig.MIXED_CLUSTER:
+ def get_node_ips_param(self, public_ip=True):
+ if self.test_config.MIXED_CLUSTER:
return 'oracle_db_nodes_public_ip' if public_ip else 'oracle_db_nodes_private_ip'
return 'db_nodes_public_ip' if public_ip else 'db_nodes_private_ip'

@@ -3557,7 +3557,7 @@ def set_seeds(self, wait_for_timeout=300, first_only=False):
node.wait_ssh_up()
seed_nodes_ips = [node.ip_address]

- elif seeds_selector == 'reflector' or TestConfig.REUSE_CLUSTER or cluster_backend == 'aws-siren':
+ elif seeds_selector == 'reflector' or self.test_config.REUSE_CLUSTER or cluster_backend == 'aws-siren':
node = self.nodes[0]
node.wait_ssh_up()
# When cluster just started, seed IP in the scylla.yaml may be like '127.0.0.1'
@@ -4127,7 +4127,7 @@ def node_setup(self, node: BaseNode, verbose: bool = False, timeout: int = 3600)
if self.params.get("use_preinstalled_scylla") and node.is_scylla_installed(raise_if_not_installed=True):
install_scylla = False

- if not TestConfig.REUSE_CLUSTER:
+ if not self.test_config.REUSE_CLUSTER:
node.disable_daily_triggered_services()
nic_devname = node.get_nic_devices()[0]
if install_scylla:
@@ -4144,10 +4144,10 @@ def node_setup(self, node: BaseNode, verbose: bool = False, timeout: int = 3600)
return

self.get_scylla_version()
- if TestConfig.BACKTRACE_DECODING:
+ if self.test_config.BACKTRACE_DECODING:
node.install_scylla_debuginfo()

- if TestConfig.MULTI_REGION:
+ if self.test_config.MULTI_REGION:
node.datacenter_setup(self.datacenter) # pylint: disable=no-member
self.node_config_setup(node, ','.join(self.seed_nodes_ips), self.get_endpoint_snitch())

@@ -4218,7 +4218,7 @@ def _reuse_cluster_setup(self, node):

def get_endpoint_snitch(self, default_multi_region="GossipingPropertyFileSnitch"):
endpoint_snitch = self.params.get('endpoint_snitch')
- if TestConfig.MULTI_REGION:
+ if self.test_config.MULTI_REGION:
if not endpoint_snitch:
endpoint_snitch = default_multi_region
return endpoint_snitch
@@ -4363,19 +4363,19 @@ def get_node_ip_list(verification_node):
LOGGER.info('Decommission %s PASS', node)
with DbEventsFilter(db_event=DatabaseLogEvent.POWER_OFF, node=node):
self.terminate_node(node) # pylint: disable=no-member
- TestConfig.tester_obj().monitors.reconfigure_scylla_monitoring()
+ self.test_config.tester_obj().monitors.reconfigure_scylla_monitoring()

def decommission(self, node):
node.run_nodetool("decommission")
self.verify_decommission(node)

@property
def scylla_manager_node(self) -> BaseNode:
- return TestConfig.tester_obj().monitors.nodes[0]
+ return self.test_config.tester_obj().monitors.nodes[0]

@property
def scylla_manager_auth_token(self) -> str:
- return TestConfig.tester_obj().monitors.mgmt_auth_token
+ return self.test_config.tester_obj().monitors.mgmt_auth_token

@property
def scylla_manager_cluster_name(self):
@@ -4466,7 +4466,7 @@ def node_setup(self, node, verbose=False, db_node_address=None, **kwargs): # py
# update repo cache and system after system is up
node.update_repo_cache()

- if TestConfig.REUSE_CLUSTER:
+ if TestConfig().REUSE_CLUSTER:
self.kill_stress_thread()
return

@@ -4746,6 +4746,7 @@ def __init__(self, targets, params):
self.phantomjs_installed = False
self.grafana_start_time = 0
self._sct_dashboard_json_file = None
+ self.test_config = TestConfig()

@staticmethod
@retrying(n=5)
@@ -4822,17 +4823,17 @@ def node_setup(self, node, **kwargs): # pylint: disable=unused-argument
self.log.info('TestConfig in BaseMonitorSet')
node.wait_ssh_up()
# add swap file
- if not TestConfig.REUSE_CLUSTER:
+ if not self.test_config.REUSE_CLUSTER:
monitor_swap_size = self.params.get("monitor_swap_size")
if not monitor_swap_size:
self.log.info("Swap file for the monitor is not configured")
else:
node.create_swap_file(monitor_swap_size)
# update repo cache and system after system is up
node.update_repo_cache()
- self.mgmt_auth_token = TestConfig.test_id() # pylint: disable=attribute-defined-outside-init
+ self.mgmt_auth_token = self.test_config.test_id() # pylint: disable=attribute-defined-outside-init

- if TestConfig.REUSE_CLUSTER:
+ if self.test_config.REUSE_CLUSTER:
self.configure_scylla_monitoring(node)
self.restart_scylla_monitoring(sct_metrics=True)
set_grafana_url(f"http://{normalize_ipv6_url(node.external_address)}:{self.grafana_port}")
@@ -5230,7 +5231,7 @@ def get_grafana_screenshot_and_snapshot(self, test_start_time=None):
extra_entities=grafana_extra_dashboards)
screenshot_files = screenshot_collector.collect(node, self.logdir)
for screenshot in screenshot_files:
- s3_path = "{test_id}/{date}".format(test_id=TestConfig.test_id(), date=date_time)
+ s3_path = "{test_id}/{date}".format(test_id=self.test_config.test_id(), date=date_time)
screenshot_links.append(S3Storage().upload_file(screenshot, s3_path))

snapshots_collector = GrafanaSnapshot(name="grafana-snapshot",
@@ -5247,7 +5248,7 @@ def upload_annotations_to_s3(self):
try:
annotations = self.get_grafana_annotations(self.nodes[0])
if annotations:
- annotations_url = S3Storage().generate_url('annotations.json', TestConfig.test_id())
+ annotations_url = S3Storage().generate_url('annotations.json', self.test_config.test_id())
self.log.info("Uploading 'annotations.json' to {s3_url}".format(
s3_url=annotations_url))
response = requests.put(annotations_url, data=annotations, headers={
@@ -5265,7 +5266,7 @@ def download_monitor_data(self) -> str:
try:
if snapshot_archive := PrometheusSnapshots(name='prometheus_snapshot').collect(self.nodes[0], self.logdir):
self.log.debug("Snapshot local path: %s", snapshot_archive)
- return upload_archive_to_s3(snapshot_archive, TestConfig.test_id())
+ return upload_archive_to_s3(snapshot_archive, self.test_config.test_id())
except Exception as details: # pylint: disable=broad-except
self.log.error("Error downloading prometheus data dir: %s", details)
return ""
diff --git a/sdcm/cluster_aws.py b/sdcm/cluster_aws.py
--- a/sdcm/cluster_aws.py
+++ b/sdcm/cluster_aws.py
@@ -44,7 +44,6 @@
from sdcm.sct_events.system import SpotTerminationEvent
from sdcm.sct_events.filters import DbEventsFilter
from sdcm.sct_events.database import DatabaseLogEvent
-from sdcm.test_config import TestConfig

LOGGER = logging.getLogger(__name__)

@@ -106,9 +105,8 @@ def __str__(self):
self._ec2_ami_id,
self._ec2_instance_type)

- @staticmethod
- def calculate_spot_duration_for_test():
- return floor(TestConfig.TEST_DURATION / 60) * 60 + 60
+ def calculate_spot_duration_for_test(self):
+ return floor(self.test_config.TEST_DURATION / 60) * 60 + 60

def _create_on_demand_instances(self, count, interfaces, ec2_user_data, dc_idx=0): # pylint: disable=too-many-arguments
ami_id = self._ec2_ami_id[dc_idx]
@@ -296,7 +294,7 @@ def _create_mixed_instances(self, count, interfaces, ec2_user_data, dc_idx): #

def _get_instances(self, dc_idx):

- test_id = cluster.TestConfig.test_id()
+ test_id = self.test_config.test_id()
if not test_id:
raise ValueError("test_id should be configured for using reuse_cluster")

@@ -412,7 +410,7 @@ def configure_eth1_script():
# pylint: disable=too-many-arguments

def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False):
- post_boot_script = cluster.TestConfig.get_startup_script()
+ post_boot_script = self.test_config.get_startup_script()
if self.extra_network_interface:
post_boot_script += self.configure_eth1_script()

@@ -430,7 +428,7 @@ def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_boots
else:
ec2_user_data = post_boot_script

- if cluster.TestConfig.REUSE_CLUSTER:
+ if self.test_config.REUSE_CLUSTER:
instances = self._get_instances(dc_idx)
else:
instances = self._create_instances(count, ec2_user_data, dc_idx)
@@ -490,7 +488,7 @@ def init(self):
LOGGER.debug("Waiting until instance {0._instance} starts running...".format(self))
self._instance_wait_safe(self._instance.wait_until_running)

- if not cluster.TestConfig.REUSE_CLUSTER:
+ if not self.test_config.REUSE_CLUSTER:
resources_to_tag = [self._instance.id, ]
if len(self._instance.network_interfaces) == 2:
# first we need to configure the both networks so we'll have public ip
@@ -572,7 +570,7 @@ def external_address(self):
"""
if self.parent_cluster.params.get("ip_ssh_connections") == "ipv6":
return self.ipv6_ip_address
- elif TestConfig.IP_SSH_CONNECTIONS == 'public' or cluster.TestConfig.INTRA_NODE_COMM_PUBLIC:
+ elif self.test_config.IP_SSH_CONNECTIONS == 'public' or self.test_config.INTRA_NODE_COMM_PUBLIC:
return self.public_ip_address
else:
return self._instance.private_ip_address
@@ -833,7 +831,7 @@ def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint:
params=None):
# pylint: disable=too-many-locals
# We have to pass the cluster name in advance in user_data
- cluster_uuid = cluster.TestConfig.test_id()
+ cluster_uuid = self.test_config.test_id()
cluster_prefix = cluster.prepend_user_prefix(user_prefix, 'db-cluster')
node_prefix = cluster.prepend_user_prefix(user_prefix, 'db-node')

@@ -923,7 +921,7 @@ def node_config_setup(self, node, seed_address=None, endpoint_snitch=None,
ldap=self.params.get('use_ldap_authorization'),
ms_ad_ldap=self.params.get('use_ms_ad_ldap'),
)
- if cluster.TestConfig.INTRA_NODE_COMM_PUBLIC:
+ if self.test_config.INTRA_NODE_COMM_PUBLIC:
setup_params.update(dict(
broadcast=node.public_ip_address,
))
@@ -1033,7 +1031,7 @@ def node_setup(self, node, verbose=False, timeout=3600):
node.wait_ssh_up(verbose=verbose)
node.wait_db_up(verbose=verbose)

- if cluster.TestConfig.REUSE_CLUSTER:
+ if self.test_config.REUSE_CLUSTER:
# for reconfigure rsyslog
node.run_startup_script()
return
diff --git a/sdcm/cluster_docker.py b/sdcm/cluster_docker.py
--- a/sdcm/cluster_docker.py
+++ b/sdcm/cluster_docker.py
@@ -150,7 +150,7 @@ def __init__(self,
n_nodes: Union[list, int] = 3,
params: dict = None) -> None:
self.source_image = f"{docker_image}:{docker_image_tag}"
- self.node_container_image_tag = f"scylla-sct:{node_type}-{str(cluster.TestConfig.test_id())[:8]}"
+ self.node_container_image_tag = f"scylla-sct:{node_type}-{str(self.test_config.test_id())[:8]}"
self.node_container_key_file = node_key_file

super().__init__(cluster_prefix=cluster_prefix,
@@ -204,7 +204,7 @@ def _get_nodes(self):
return self.nodes

def add_nodes(self, count, ec2_user_data="", dc_idx=0, rack=0, enable_auto_bootstrap=False):
- return self._get_nodes() if cluster.TestConfig.REUSE_CLUSTER else self._create_nodes(count, enable_auto_bootstrap)
+ return self._get_nodes() if self.test_config.REUSE_CLUSTER else self._create_nodes(count, enable_auto_bootstrap)


class ScyllaDockerCluster(cluster.BaseScyllaCluster, DockerCluster): # pylint: disable=abstract-method
@@ -236,7 +236,7 @@ def node_setup(self, node, verbose=False, timeout=3600):

self.check_aio_max_nr(node)

- if cluster.TestConfig.BACKTRACE_DECODING:
+ if self.test_config.BACKTRACE_DECODING:
node.install_scylla_debuginfo()

self.node_config_setup(node, seed_address, endpoint_snitch)
diff --git a/sdcm/cluster_gce.py b/sdcm/cluster_gce.py
--- a/sdcm/cluster_gce.py
+++ b/sdcm/cluster_gce.py
@@ -316,7 +316,7 @@ def _create_instance(self, node_index, dc_idx, spot=False):
# Name must start with a lowercase letter followed by up to 63
# lowercase letters, numbers, or hyphens, and cannot end with a hyphen
assert len(name) <= 63, "Max length of instance name is 63"
- startup_script = cluster.TestConfig.get_startup_script()
+ startup_script = self.test_config.get_startup_script()

if self.params.get("scylla_linux_distro") in ("ubuntu-bionic", "ubuntu-xenial", "ubuntu-focal",):
# we need to disable sshguard to prevent blocking connections from the builder
@@ -387,7 +387,7 @@ def _get_instances_by_name(self, name: str, dc_idx: int = 0):
return found[0] if found else None

def _get_instances(self, dc_idx):
- test_id = cluster.TestConfig.test_id()
+ test_id = self.test_config.test_id()
if not test_id:
raise ValueError("test_id should be configured for using reuse_cluster")
instances_by_nodetype = list_instances_gce(tags_dict={'TestId': test_id, 'NodeType': self.node_type})
@@ -431,10 +431,10 @@ def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_boots
return []
self.log.info("Adding nodes to cluster")
nodes = []
- if cluster.TestConfig.REUSE_CLUSTER:
+ if self.test_config.REUSE_CLUSTER:
instances = self._get_instances(dc_idx)
if not instances:
- raise RuntimeError("No nodes found for testId %s " % (cluster.TestConfig.test_id(),))
+ raise RuntimeError("No nodes found for testId %s " % (self.test_config.test_id(),))
else:
instances = self._create_instances(count, dc_idx)

diff --git a/sdcm/cluster_k8s/__init__.py b/sdcm/cluster_k8s/__init__.py
--- a/sdcm/cluster_k8s/__init__.py
+++ b/sdcm/cluster_k8s/__init__.py
@@ -274,9 +274,10 @@ class KubernetesCluster(metaclass=abc.ABCMeta): # pylint: disable=too-many-publ
pools: Dict[str, CloudK8sNodePool]

def __init__(self, params: dict, user_prefix: str = '', region_name: str = None, cluster_uuid: str = None):
+ self.test_config = TestConfig()
self.pools = {}
if cluster_uuid is None:
- self.uuid = TestConfig.test_id()
+ self.uuid = self.test_config.test_id()
else:
self.uuid = cluster_uuid
self.region_name = region_name
@@ -352,19 +353,19 @@ def kubectl_multi_cmd(self, *command, namespace=None, timeout=KUBECTL_TIMEOUT, r
def helm(self):
if self.api_call_rate_limiter:
self.api_call_rate_limiter.wait()
- return partial(cluster.TestConfig.tester_obj().localhost.helm, self)
+ return partial(self.test_config.tester_obj().localhost.helm, self)

@property
def helm_install(self):
if self.api_call_rate_limiter:
self.api_call_rate_limiter.wait()
- return partial(cluster.TestConfig.tester_obj().localhost.helm_install, self)
+ return partial(self.test_config.tester_obj().localhost.helm_install, self)

@property
def helm_upgrade(self):
if self.api_call_rate_limiter:
self.api_call_rate_limiter.wait()
- return partial(cluster.TestConfig.tester_obj().localhost.helm_upgrade, self)
+ return partial(self.test_config.tester_obj().localhost.helm_upgrade, self)

@cached_property
def kubectl_token_path(self): # pylint: disable=no-self-use
@@ -2002,10 +2003,10 @@ def install_scylla_manager(self, node):

def node_setup(self, node: BaseScyllaPodContainer, verbose: bool = False, timeout: int = 3600):
self.get_scylla_version()
- if TestConfig.BACKTRACE_DECODING:
+ if self.test_config.BACKTRACE_DECODING:
node.install_scylla_debuginfo()

- if TestConfig.MULTI_REGION:
+ if self.test_config.MULTI_REGION:
node.datacenter_setup(self.datacenter) # pylint: disable=no-member
try:
# NOTE: case of seedful scylla (operator v1.3.0-)
@@ -2126,7 +2127,7 @@ def terminate_node(self, node: BasePodContainer, scylla_shards=""): # pylint: d
name=node.name,
public_ip=node.public_ip_address,
private_ip=node.private_ip_address,
- ipv6_ip=node.ipv6_ip_address if TestConfig.IP_SSH_CONNECTIONS == "ipv6" else '',
+ ipv6_ip=node.ipv6_ip_address if self.test_config.IP_SSH_CONNECTIONS == "ipv6" else '',
ip_address=node.ip_address,
shards=scylla_shards or node.scylla_shards,
termination_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
@@ -2156,7 +2157,7 @@ def decommission(self, node):
# if current_members - 1 == 0:
# self.delete_rack(rack)

- if monitors := cluster.TestConfig.tester_obj().monitors:
+ if monitors := self.test_config.tester_obj().monitors:
monitors.reconfigure_scylla_monitoring()

def upgrade_scylla_cluster(self, new_version: str) -> None:
@@ -2329,4 +2330,4 @@ def get_tags_from_params(params: dict) -> Dict[str, str]:
post_behavior_idx = behaviors.index(params.get(f"post_behavior_{node_type}_nodes").lower())
picked_behavior_idx = min(post_behavior_idx, picked_behavior_idx)
picked_behavior = behaviors[picked_behavior_idx]
- return {**TestConfig.common_tags(), "keep_action": "terminate" if picked_behavior == "destroy" else "", }
+ return {**TestConfig().common_tags(), "keep_action": "terminate" if picked_behavior == "destroy" else "", }
diff --git a/sdcm/cluster_k8s/gke.py b/sdcm/cluster_k8s/gke.py
--- a/sdcm/cluster_k8s/gke.py
+++ b/sdcm/cluster_k8s/gke.py
@@ -27,7 +27,6 @@

from sdcm.cluster_k8s.iptables import IptablesPodIpRedirectMixin, IptablesClusterOpsMixin
from sdcm.cluster_gce import MonitorSetGCE
-from sdcm.test_config import TestConfig

GKE_API_CALL_RATE_LIMIT = 5 # ops/s
GKE_API_CALL_QUEUE_SIZE = 1000 # ops
@@ -234,7 +233,7 @@ def deploy(self):

@cached_property
def gcloud(self) -> GcloudContextManager: # pylint: disable=no-self-use
- return cluster.TestConfig.tester_obj().localhost.gcloud
+ return self.test_config.tester_obj().localhost.gcloud

def deploy_node_pool(self, pool: GkeNodePool, wait_till_ready=True) -> None:
self._add_pool(pool)
@@ -305,13 +304,13 @@ def gce_node_ips(self):

@cached_property
def hydra_dest_ip(self) -> str:
- if TestConfig.IP_SSH_CONNECTIONS == "public" or TestConfig.INTRA_NODE_COMM_PUBLIC:
+ if self.test_config.IP_SSH_CONNECTIONS == "public" or self.test_config.INTRA_NODE_COMM_PUBLIC:
return self.gce_node_ips[0][0]
return self.gce_node_ips[1][0]

@cached_property
def nodes_dest_ip(self) -> str:
- if cluster.TestConfig.INTRA_NODE_COMM_PUBLIC:
+ if self.test_config.INTRA_NODE_COMM_PUBLIC:
return self.gce_node_ips[0][0]
return self.gce_node_ips[1][0]

diff --git a/sdcm/cluster_k8s/iptables.py b/sdcm/cluster_k8s/iptables.py
--- a/sdcm/cluster_k8s/iptables.py
+++ b/sdcm/cluster_k8s/iptables.py
@@ -16,9 +16,8 @@
from itertools import chain
from typing import Literal, List, Optional

-from sdcm import cluster
from sdcm.remote import LOCALRUNNER, shell_script_cmd
-
+from sdcm.test_config import TestConfig

IPTABLES_BIN = "iptables"
IPTABLES_LEGACY_BIN = "iptables-legacy"
@@ -85,7 +84,7 @@ def update_nodes_iptables_redirect_rules(self,
loaders: bool = True,
monitors: bool = True) -> None:
nodes_to_update = []
- if tester := cluster.TestConfig.tester_obj():
+ if tester := TestConfig().tester_obj():
if loaders and tester.loaders:
nodes_to_update.extend(tester.loaders.nodes)
if monitors and tester.monitors:
diff --git a/sdcm/cluster_k8s/mini_k8s.py b/sdcm/cluster_k8s/mini_k8s.py
--- a/sdcm/cluster_k8s/mini_k8s.py
+++ b/sdcm/cluster_k8s/mini_k8s.py
@@ -437,10 +437,10 @@ def deploy(self):
if not self.is_k8s_software_installed:
self.setup_k8s_software()
if not self.is_k8s_software_running:
- if cluster.TestConfig.REUSE_CLUSTER:
+ if self.test_config.REUSE_CLUSTER:
raise RuntimeError("SCT_REUSE_CLUSTER is set, but target host is not ready")
self.start_k8s_software()
- elif not cluster.TestConfig.REUSE_CLUSTER:
+ elif not self.test_config.REUSE_CLUSTER:
self.stop_k8s_software()
self.start_k8s_software()
self.create_kubectl_config()
diff --git a/sdcm/db_stats.py b/sdcm/db_stats.py
--- a/sdcm/db_stats.py
+++ b/sdcm/db_stats.py
@@ -29,6 +29,7 @@
import requests

from sdcm.es import ES
+from sdcm.test_config import TestConfig
from sdcm.utils.common import get_job_name, normalize_ipv6_url
from sdcm.utils.decorators import retrying
from sdcm.sct_events.system import ElasticsearchEvent
@@ -348,6 +349,7 @@ def __init__(self, *args, **kwargs):
self._test_id = kwargs.get("test_id")
self._es_doc_type = "test_stats"
self._stats = {}
+ self.test_config = TestConfig()

# For using this class as a base for TestStatsMixin.
if not self._test_id:
@@ -427,8 +429,7 @@ class TestStatsMixin(Stats):
STRESS_STATS = ('op rate', 'latency mean', 'latency 99th percentile')
STRESS_STATS_TOTAL = ('op rate', 'Total errors')

- @staticmethod
- def _create_test_id(doc_id_with_timestamp=False):
+ def _create_test_id(self, doc_id_with_timestamp=False):
"""Return doc_id equal unified test-id

Generate doc_id for ES document as unified global test-id
@@ -441,8 +442,7 @@ def _create_test_id(doc_id_with_timestamp=False):
:rtype: {str}
"""
# avoid cyclic-decencies between cluster and db_stats
- from sdcm.cluster import TestConfig # pylint: disable=import-outside-toplevel
- doc_id = TestConfig.test_id()
+ doc_id = self.test_config.test_id()
if doc_id_with_timestamp:
doc_id += "_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f"))
return doc_id
@@ -511,9 +511,6 @@ def get_setup_details(self):
return setup_details

def get_test_details(self):
- # avoid cyclic-decencies between cluster and db_stats
- from sdcm.cluster import TestConfig # pylint: disable=import-outside-toplevel
-
test_details = {}
test_details['sct_git_commit'] = subprocess.check_output(['git', 'rev-parse', 'HEAD'], text=True).strip()
test_details['job_name'] = get_job_name()
@@ -525,7 +522,7 @@ def get_test_details(self):
test_details['grafana_screenshots'] = []
test_details['grafana_annotations'] = []
test_details['prometheus_data'] = ""
- test_details['test_id'] = TestConfig.test_id()
+ test_details['test_id'] = self.test_config.test_id()
test_details['log_files'] = {}
return test_details

diff --git a/sdcm/sct_events/events_analyzer.py b/sdcm/sct_events/events_analyzer.py
--- a/sdcm/sct_events/events_analyzer.py
+++ b/sdcm/sct_events/events_analyzer.py
@@ -53,7 +53,7 @@ def run(self) -> None:

def kill_test(self, backtrace_with_reason) -> None:
self.terminate()
- if tester := TestConfig.tester_obj():
+ if tester := TestConfig().tester_obj():
tester.kill_test(backtrace_with_reason)
else:
LOGGER.error("No test was registered using `TestConfig.set_tester_obj()', do not kill")
diff --git a/sdcm/test_config.py b/sdcm/test_config.py
--- a/sdcm/test_config.py
+++ b/sdcm/test_config.py
@@ -208,7 +208,7 @@ def get_startup_script(cls):
''')
if cls.RSYSLOG_ADDRESS:

- if cls.IP_SSH_CONNECTIONS == 'public' or TestConfig.MULTI_REGION:
+ if cls.IP_SSH_CONNECTIONS == 'public' or cls.MULTI_REGION:
post_boot_script += dedent('''
sudo echo 'action(type="omfwd" Target="{0}" Port="{1}" Protocol="tcp")'>> /etc/rsyslog.conf
sudo systemctl restart rsyslog
diff --git a/sdcm/tester.py b/sdcm/tester.py
--- a/sdcm/tester.py
+++ b/sdcm/tester.py
@@ -93,7 +93,7 @@
except ImportError:
cluster_cloud = None

-configure_logging(exception_handler=handle_exception, variables={'log_dir': TestConfig.logdir()})
+configure_logging(exception_handler=handle_exception, variables={'log_dir': TestConfig().logdir()})

try:
from botocore.vendored.requests.packages.urllib3.contrib.pyopenssl import extract_from_urllib3
@@ -241,13 +241,13 @@ def __init__(self, *args): # pylint: disable=too-many-statements,too-many-local
self._init_params()
reuse_cluster_id = self.params.get('reuse_cluster')
if reuse_cluster_id:
- TestConfig.reuse_cluster(True)
- TestConfig.set_test_id(reuse_cluster_id)
+ self.test_config.reuse_cluster(True)
+ self.test_config.set_test_id(reuse_cluster_id)
else:
# Test id is set by Hydra or generated if running without Hydra
- TestConfig.set_test_id(self.params.get('test_id') or uuid4())
- TestConfig.set_test_name(self.id())
- TestConfig.set_tester_obj(self)
+ self.test_config.set_test_id(self.params.get('test_id') or uuid4())
+ self.test_config.set_test_name(self.id())
+ self.test_config.set_tester_obj(self)
self._init_logging()
RemoteCmdRunnerBase.set_default_ssh_transport(self.params.get('ssh_transport'))

@@ -259,32 +259,32 @@ def __init__(self, *args): # pylint: disable=too-many-statements,too-many-local
ip_ssh_connections = self.params.get(key='ip_ssh_connections')
self.log.debug("IP used for SSH connections is '%s'",
ip_ssh_connections)
- TestConfig.set_ip_ssh_connections(ip_ssh_connections)
+ self.test_config.set_ip_ssh_connections(ip_ssh_connections)
self._duration = self.params.get(key='test_duration')
post_behavior_db_nodes = self.params.get('post_behavior_db_nodes')
self.log.debug('Post behavior for db nodes %s', post_behavior_db_nodes)
- TestConfig.keep_cluster(node_type='db_nodes', val=post_behavior_db_nodes)
+ self.test_config.keep_cluster(node_type='db_nodes', val=post_behavior_db_nodes)
post_behavior_monitor_nodes = self.params.get('post_behavior_monitor_nodes')
self.log.debug('Post behavior for loader nodes %s', post_behavior_monitor_nodes)
- TestConfig.keep_cluster(node_type='monitor_nodes', val=post_behavior_monitor_nodes)
+ self.test_config.keep_cluster(node_type='monitor_nodes', val=post_behavior_monitor_nodes)
post_behavior_loader_nodes = self.params.get('post_behavior_loader_nodes')
self.log.debug('Post behavior for loader nodes %s', post_behavior_loader_nodes)
- TestConfig.keep_cluster(node_type='loader_nodes', val=post_behavior_loader_nodes)
- TestConfig.set_duration(self._duration)
+ self.test_config.keep_cluster(node_type='loader_nodes', val=post_behavior_loader_nodes)
+ self.test_config.set_duration(self._duration)
cluster_backend = self.params.get('cluster_backend')
if cluster_backend == 'aws':
- TestConfig.set_multi_region(len(self.params.get('region_name').split()) > 1)
+ self.test_config.set_multi_region(len(self.params.get('region_name').split()) > 1)
elif cluster_backend == 'gce':
- TestConfig.set_multi_region(len(self.params.get('gce_datacenter').split()) > 1)
+ self.test_config.set_multi_region(len(self.params.get('gce_datacenter').split()) > 1)

if self.params.get("backup_bucket_backend") == "azure":
- TestConfig.set_backup_azure_blob_credentials()
+ self.test_config.set_backup_azure_blob_credentials()

- TestConfig.BACKTRACE_DECODING = self.params.get('backtrace_decoding')
- if TestConfig.BACKTRACE_DECODING:
- TestConfig.set_decoding_queue()
- TestConfig.set_intra_node_comm_public(self.params.get(
- 'intra_node_comm_public') or TestConfig.MULTI_REGION)
+ self.test_config.BACKTRACE_DECODING = self.params.get('backtrace_decoding')
+ if self.test_config.BACKTRACE_DECODING:
+ self.test_config.set_decoding_queue()
+ self.test_config.set_intra_node_comm_public(self.params.get(
+ 'intra_node_comm_public') or self.test_config.MULTI_REGION)

# for saving test details in DB
self.create_stats = self.params.get(key='store_perf_results')
@@ -300,12 +300,12 @@ def __init__(self, *args): # pylint: disable=too-many-statements,too-many-local
self._move_kubectl_config()
self.localhost = self._init_localhost()
if self.params.get("logs_transport") == 'rsyslog':
- TestConfig.configure_rsyslog(self.localhost, enable_ngrok=False)
+ self.test_config.configure_rsyslog(self.localhost, enable_ngrok=False)

self.alternator: alternator.api.Alternator = alternator.api.Alternator(sct_params=self.params)
if self.params.get("use_ms_ad_ldap"):
ldap_ms_ad_credentials = KeyStore().get_ldap_ms_ad_credentials()
- TestConfig.LDAP_ADDRESS = ldap_ms_ad_credentials["server_address"]
+ self.test_config.LDAP_ADDRESS = ldap_ms_ad_credentials["server_address"]
elif self.params.get("use_ldap_authorization") or self.params.get("prepare_saslauthd") or self.params.get(
"use_saslauthd_authenticator"):
self.configure_ldap(node=self.localhost, use_ssl=False)
@@ -315,7 +315,7 @@ def __init__(self, *args): # pylint: disable=too-many-statements,too-many-local
self.params['are_ldap_users_on_scylla'] = False
ldap_role = LDAP_ROLE
ldap_users = LDAP_USERS.copy()
- ldap_address = list(TestConfig.LDAP_ADDRESS).copy()
+ ldap_address = list(self.test_config.LDAP_ADDRESS).copy()
unique_members_list = [f'uid={user},ou=Person,{LDAP_BASE_OBJECT}' for user in ldap_users]
user_password = LDAP_PASSWORD # not in use not for authorization, but must be in the config
ldap_entry = [f'cn={ldap_role},{LDAP_BASE_OBJECT}',
@@ -326,7 +326,7 @@ def __init__(self, *args): # pylint: disable=too-many-statements,too-many-local
if (self.params.get("prepare_saslauthd")
or self.params.get("use_saslauthd_authenticator")) and not self.params.get("use_ms_ad_ldap"):
ldap_users = LDAP_USERS.copy()
- ldap_address = list(TestConfig.LDAP_ADDRESS).copy()
+ ldap_address = list(self.test_config.LDAP_ADDRESS).copy()
ldap_entry = [f'ou=Person,{LDAP_BASE_OBJECT}',
['organizationalUnit', 'top'],
{'ou': 'Person'}]
@@ -343,13 +343,13 @@ def __init__(self, *args): # pylint: disable=too-many-statements,too-many-local
self.alternator = alternator.api.Alternator(sct_params=self.params)
start_events_device(log_dir=self.logdir, _registry=self.events_processes_registry)
time.sleep(0.5)
- InfoEvent(message=f"TEST_START test_id={TestConfig.test_id()}").publish()
+ InfoEvent(message=f"TEST_START test_id={self.test_config.test_id()}").publish()

def configure_ldap(self, node, use_ssl=False):
- TestConfig.configure_ldap(node=node, use_ssl=use_ssl)
+ self.test_config.configure_ldap(node=node, use_ssl=use_ssl)
ldap_role = LDAP_ROLE
ldap_users = LDAP_USERS.copy()
- ldap_address = list(TestConfig.LDAP_ADDRESS).copy()
+ ldap_address = list(self.test_config.LDAP_ADDRESS).copy()
unique_members_list = [f'uid={user},ou=Person,{LDAP_BASE_OBJECT}' for user in ldap_users]
ldap_username = f'cn=admin,{LDAP_BASE_OBJECT}'
user_password = LDAP_PASSWORD # not in use not for authorization, but must be in the config
@@ -374,7 +374,7 @@ def kill_the_test():
return thread

def _init_localhost(self):
- return LocalHost(user_prefix=self.params.get("user_prefix"), test_id=TestConfig.test_id())
+ return LocalHost(user_prefix=self.params.get("user_prefix"), test_id=self.test_config.test_id())

def _move_kubectl_config(self):
secure_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
@@ -404,7 +404,7 @@ def _init_params(self):

def _init_logging(self):
self.log = logging.getLogger(self.__class__.__name__)
- self.logdir = TestConfig.logdir()
+ self.logdir = self.test_config.logdir()

def run(self, result=None):
self.result = self.defaultTestResult() if result is None else result
@@ -479,7 +479,7 @@ def get_test_status(self) -> str:

def kill_test(self, backtrace_with_reason):
test_pid = os.getpid()
- self.result.addFailure(TestConfig.tester_obj(), backtrace_with_reason)
+ self.result.addFailure(self.test_config.tester_obj(), backtrace_with_reason)
os.kill(test_pid, signal.SIGUSR2)

def download_db_packages(self):
@@ -555,7 +555,7 @@ def setUp(self):
self.monitors.wait_for_init()

# cancel reuse cluster - for new nodes added during the test
- TestConfig.reuse_cluster(False)
+ self.test_config.reuse_cluster(False)
if self.monitors and self.monitors.nodes:
self.prometheus_db = PrometheusDBStats(host=self.monitors.nodes[0].public_ip_address)
self.start_time = time.time()
@@ -573,7 +573,7 @@ def set_system_auth_rf(self):
return
# change RF of system_auth
system_auth_rf = self.params.get('system_auth_rf')
- if system_auth_rf > 1 and not TestConfig.REUSE_CLUSTER:
+ if system_auth_rf > 1 and not self.test_config.REUSE_CLUSTER:
self.log.info('change RF of system_auth to %s', system_auth_rf)
node = self.db_cluster.nodes[0]
credentials = self.db_cluster.get_db_auth()
@@ -813,7 +813,7 @@ def create_cluster(db_type='scylla'):
ec2_ami_username=self.params.get('ami_db_cassandra_user'),
**cl_params)
elif db_type == 'mixed_scylla':
- TestConfig.mixed_cluster(True)
+ self.test_config.mixed_cluster(True)
n_test_oracle_db_nodes = self.params.get('n_test_oracle_db_nodes')
cl_params.update(dict(ec2_instance_type=self.params.get('instance_type_db_oracle'),
user_prefix=user_prefix + '-oracle',
@@ -1013,8 +1013,9 @@ def get_cluster_k8s_gce_minikube(self):
params=self.params)

self.log.debug("Update startup script with iptables rules")
- startup_script = "\n".join((TestConfig.get_startup_script(), *self.db_cluster.nodes_iptables_redirect_rules(),))
- TestConfig.get_startup_script = lambda: startup_script
+ startup_script = "\n".join((self.test_config.get_startup_script(),
+ *self.db_cluster.nodes_iptables_redirect_rules(),))
+ self.test_config.get_startup_script = lambda: startup_script

self.loaders = LoaderSetGCE(gce_image=self.params.get("gce_image"),
gce_image_type=self.params.get("gce_root_disk_type_loader"),
@@ -1134,9 +1135,9 @@ def get_cluster_k8s_gke(self):

if self.params.get("n_monitor_nodes") > 0:
self.log.debug("Update startup script with iptables rules")
- startup_script = "\n".join((TestConfig.get_startup_script(), *
+ startup_script = "\n".join((self.test_config.get_startup_script(), *
self.db_cluster.nodes_iptables_redirect_rules(),))
- TestConfig.get_startup_script = lambda: startup_script
+ self.test_config.get_startup_script = lambda: startup_script

self.monitors = gke.MonitorSetGKE(
gce_image=self.params.get("gce_image_monitor"),
@@ -1272,9 +1273,9 @@ def get_cluster_k8s_eks(self):

if monitor_info['n_nodes']:
self.log.debug("Update startup script with iptables rules")
- startup_script = "\n".join((TestConfig.get_startup_script(), *
+ startup_script = "\n".join((self.test_config.get_startup_script(), *
self.db_cluster.nodes_iptables_redirect_rules(),))
- TestConfig.get_startup_script = lambda: startup_script
+ self.test_config.get_startup_script = lambda: startup_script

self.monitors = MonitorSetEKS(
ec2_ami_id=self.params.get('ami_id_monitor').split(),
@@ -1329,7 +1330,7 @@ def init_resources(self, loader_info=None, db_info=None,

def _cs_add_node_flag(self, stress_cmd):
if '-node' not in stress_cmd:
- if TestConfig.INTRA_NODE_COMM_PUBLIC:
+ if self.test_config.INTRA_NODE_COMM_PUBLIC:
ip = ','.join(self.db_cluster.get_node_public_ips())
else:
ip = self.db_cluster.get_node_private_ips()[0]
@@ -2323,7 +2324,7 @@ def clean_resources(self):
return

actions_per_cluster_type = get_post_behavior_actions(self.params)
- critical_events = get_testrun_status(TestConfig.test_id(), self.logdir, only_critical=True)
+ critical_events = get_testrun_status(self.test_config.test_id(), self.logdir, only_critical=True)
if self.db_cluster is not None:
action = actions_per_cluster_type['db_nodes']['action']
self.log.info("Action for db nodes is %s", action)
@@ -2334,7 +2335,7 @@ def clean_resources(self):
self.destroy_cluster(self.cs_db_cluster)
elif action == 'keep-on-failure' and critical_events:
self.log.info('Critical errors found. Set keep flag for db nodes')
- TestConfig.keep_cluster(node_type='db_nodes', val='keep')
+ self.test_config.keep_cluster(node_type='db_nodes', val='keep')
self.set_keep_alive_on_failure(self.db_cluster)
if self.cs_db_cluster:
self.set_keep_alive_on_failure(self.cs_db_cluster)
@@ -2347,7 +2348,7 @@ def clean_resources(self):
self.loaders = None
elif action == 'keep-on-failure' and critical_events:
self.log.info('Critical errors found. Set keep flag for loader nodes')
- TestConfig.keep_cluster(node_type='loader_nodes', val='keep')
+ self.test_config.keep_cluster(node_type='loader_nodes', val='keep')
self.set_keep_alive_on_failure(self.loaders)

if self.monitors is not None:
@@ -2358,7 +2359,7 @@ def clean_resources(self):
self.monitors = None
elif action == 'keep-on-failure' and critical_events:
self.log.info('Critical errors found. Set keep flag for monitor nodes')
- TestConfig.keep_cluster(node_type='monitor_nodes', val='keep')
+ self.test_config.keep_cluster(node_type='monitor_nodes', val='keep')
self.set_keep_alive_on_failure(self.monitors)

self.destroy_credentials()
@@ -2391,7 +2392,7 @@ def tearDown(self):
if self.params.get('collect_logs'):
self.collect_sct_logs()
self.finalize_teardown()
- self.log.info('Test ID: {}'.format(TestConfig.test_id()))
+ self.log.info('Test ID: {}'.format(self.test_config.test_id()))
self._check_alive_routines_and_report_them()

def _check_alive_routines_and_report_them(self):
@@ -2442,7 +2443,7 @@ def stop_timeout_thread(self):
@silence()
def collect_sct_logs(self):
s3_link = SCTLogCollector(
- [], TestConfig.test_id(), os.path.join(self.logdir, "collected_logs"), params=self.params
+ [], self.test_config.test_id(), os.path.join(self.logdir, "collected_logs"), params=self.params
).collect_logs(self.logdir)
if s3_link:
self.log.info(s3_link)
@@ -2637,7 +2638,7 @@ def check_regression_with_baseline(self, subtest_baseline):
is_gce = bool(self.params.get('cluster_backend') == 'gce')
try:
results_analyzer.check_regression_with_subtest_baseline(self._test_id,
- base_test_id=TestConfig.test_id(),
+ base_test_id=self.test_config.test_id(),
subtest_baseline=subtest_baseline,
is_gce=is_gce)
except Exception as ex: # pylint: disable=broad-except
@@ -2765,7 +2766,7 @@ def collect_logs(self) -> None:
if not cluster["nodes"]:
continue
with silence(parent=self, name=f"Collect and publish {cluster['name']} logs"):
- collector = cluster["collector"](cluster["nodes"], TestConfig.test_id(), storage_dir, self.params)
+ collector = cluster["collector"](cluster["nodes"], self.test_config.test_id(), storage_dir, self.params)
if s3_link := collector.collect_logs(self.logdir):
self.log.info(s3_link)
logs_dict[cluster["logname"]] = s3_link
@@ -2776,7 +2777,8 @@ def collect_logs(self) -> None:
with silence(parent=self, name="Publish log links"):
self.update({"test_details": {"log_files": logs_dict, }, })

- self.log.info("Logs collected. Run command `hydra investigate show-logs %s' to get links", TestConfig.test_id())
+ self.log.info("Logs collected. Run command `hydra investigate show-logs %s' to get links",
+ self.test_config.test_id())

@silence()
def get_test_failures(self):
diff --git a/unit_tests/test_decode_backtrace.py b/unit_tests/test_decode_backtrace.py
--- a/unit_tests/test_decode_backtrace.py
+++ b/unit_tests/test_decode_backtrace.py
@@ -43,6 +43,7 @@ def setUpClass(cls):
cls.monitor_node = DecodeDummyNode(name='test_monitor_node', parent_cluster=None,
base_logdir=cls.temp_dir, ssh_login_info=dict(key_file='~/.ssh/scylla-test'))
cls.monitor_node.remoter = DummyRemote()
+ cls.test_config = TestConfig()

@classmethod
def tearDownClass(cls):
@@ -52,8 +53,8 @@ def setUp(self):
self.node.system_log = os.path.join(os.path.dirname(__file__), 'test_data', 'system.log')

def test_01_reactor_stall_is_not_decoded_if_disabled(self):
- TestConfig.DECODING_QUEUE = queue.Queue()
- TestConfig.BACKTRACE_DECODING = False
+ self.test_config.DECODING_QUEUE = queue.Queue()
+ self.test_config.BACKTRACE_DECODING = False

self.monitor_node.start_decode_on_monitor_node_thread()
self.node._read_system_log_and_publish_events() # pylint: disable=protected-access
@@ -71,9 +72,9 @@ def test_01_reactor_stall_is_not_decoded_if_disabled(self):
self.assertIsNone(event['backtrace'])

def test_02_reactor_stalls_is_decoded_if_enabled(self):
- TestConfig.BACKTRACE_DECODING = True
+ self.test_config.BACKTRACE_DECODING = True

- TestConfig.DECODING_QUEUE = queue.Queue()
+ self.test_config.DECODING_QUEUE = queue.Queue()

self.monitor_node.start_decode_on_monitor_node_thread()
self.node._read_system_log_and_publish_events() # pylint: disable=protected-access
@@ -94,8 +95,8 @@ def test_02_reactor_stalls_is_decoded_if_enabled(self):

def test_03_decode_interlace_reactor_stall(self): # pylint: disable=invalid-name

- TestConfig.DECODING_QUEUE = queue.Queue()
- TestConfig.BACKTRACE_DECODING = True
+ self.test_config.DECODING_QUEUE = queue.Queue()
+ self.test_config.BACKTRACE_DECODING = True

self.monitor_node.start_decode_on_monitor_node_thread()
self.node.system_log = os.path.join(os.path.dirname(__file__), 'test_data', 'system_interlace_stall.log')
@@ -118,8 +119,8 @@ def test_03_decode_interlace_reactor_stall(self): # pylint: disable=invalid-nam

def test_04_decode_backtraces_core(self):

- TestConfig.DECODING_QUEUE = queue.Queue()
- TestConfig.BACKTRACE_DECODING = True
+ self.test_config.DECODING_QUEUE = queue.Queue()
+ self.test_config.BACKTRACE_DECODING = True

self.monitor_node.start_decode_on_monitor_node_thread()
self.node.system_log = os.path.join(os.path.dirname(__file__), 'test_data', 'system_core.log')
diff --git a/unit_tests/test_seed_selector.py b/unit_tests/test_seed_selector.py
--- a/unit_tests/test_seed_selector.py
+++ b/unit_tests/test_seed_selector.py
@@ -7,6 +7,7 @@
from tenacity import RetryError

import sdcm.cluster
+from sdcm.test_config import TestConfig
from unit_tests.dummy_remote import DummyRemote


@@ -90,7 +91,7 @@ def test_reuse_cluster_seed(self):
self.setup_cluster(nodes_number=3)
self.cluster.set_test_params(seeds_selector='first', seeds_num=2, db_type='scylla')
sdcm.cluster.SCYLLA_YAML_PATH = os.path.join(os.path.dirname(__file__), 'test_data', 'scylla.yaml')
- sdcm.cluster.TestConfig.reuse_cluster(True)
+ TestConfig().reuse_cluster(True)
self.cluster.set_seeds()
self.assertTrue(self.cluster.seed_nodes == [self.cluster.nodes[1]])
self.assertTrue(self.cluster.non_seed_nodes == [self.cluster.nodes[0], self.cluster.nodes[2]])
回覆所有人
回覆作者
轉寄
0 則新訊息