[QUEUED scylla next] move cloud related code from scylla repository to scylla-machine-image

34 views
Skip to first unread message

Commit Bot

<bot@cloudius-systems.com>
unread,
Feb 1, 2022, 4:27:10 AM2/1/22
to scylladb-dev@googlegroups.com, Takuya ASADA
From: Takuya ASADA <sy...@scylladb.com>
Committer: Avi Kivity <a...@scylladb.com>
Branch: next

move cloud related code from scylla repository to scylla-machine-image

Currently, cloud related code have cross-dependencies between
scylla and scylla-machine-image.
It is not good way to implement, and single change can break both
package.

To resolve the issue, we need to move all cloud related code to
scylla-machine-image, and remove them from scylla repository.

Change list:
- move cloud part of scylla_util.py to scylla-machine-image
- move cloud part of scylla_io_setup to scylla-machine-image
- move scylla_ec2_check to scylla-machine-image
- move cloud part of scylla_bootparam_setup to scylla-machine-image

Closes #9957

---
diff --git a/dist/common/scripts/scylla_bootparam_setup b/dist/common/scripts/scylla_bootparam_setup
--- a/dist/common/scripts/scylla_bootparam_setup
+++ b/dist/common/scripts/scylla_bootparam_setup
@@ -8,52 +8,14 @@
# SPDX-License-Identifier: AGPL-3.0-or-later

import os
-import re
import sys
import argparse
-from scylla_util import *
-from subprocess import run
+
+# keep this script just for compatibility.

if __name__ == '__main__':
- if os.getuid() > 0:
- print('Requires root permission.')
- sys.exit(1)
parser = argparse.ArgumentParser(description='Optimize boot parameter settings for Scylla.')
parser.add_argument('--ami', action='store_true', default=False,
help='setup AMI instance')
args = parser.parse_args()
-
- if not args.ami:
- sys.exit(0)
- if not os.path.exists('/etc/default/grub') and not os.path.exists('/boot/grub/menu.lst'):
- print('Unsupported bootloader')
- sys.exit(1)
-
- if os.path.exists('/etc/default/grub'):
- cfg = sysconfig_parser('/etc/default/grub')
- for k in ['GRUB_CMDLINE_LINUX', 'GRUB_CMDLINE_LINUX_DEFAULT']:
- if cfg.has_option(k):
- grub_key = k
- break
- if not grub_key:
- print('GRUB_CMDLINE_LINUX does not found in /etc/default/grub')
- sys.exit(1)
-
- cmdline_linux = cfg.get(grub_key)
- if len(re.findall(r'.*clocksource', cmdline_linux)) == 0:
- cmdline_linux += ' clocksource=tsc tsc=reliable'
- cfg.set(grub_key, cmdline_linux)
- cfg.commit()
- if is_debian_variant():
- run('update-grub', shell=True, check=True)
- else:
- run('grub2-mkconfig -o /boot/grub2/grub.cfg', shell=True, check=True)
-
-# if is_ec2() and os.path.exists('/boot/grub/menu.lst'):
- if os.path.exists('/boot/grub/menu.lst'):
- with open('/boot/grub/menu.lst') as f:
- cur = f.read()
- if len(re.findall(r'^\s*kernel.*clocksource', cur, flags=re.MULTILINE)) == 0:
- new = re.sub(r'(^\s*kernel.*)', r'\1 clocksource=tsc tsc=reliable ', cur, flags=re.MULTILINE)
- with open('/boot/grub/menu.lst', 'w') as f:
- f.write(new)
+ sys.exit(0)
diff --git a/dist/common/scripts/scylla_ec2_check b/dist/common/scripts/scylla_ec2_check
--- a/dist/common/scripts/scylla_ec2_check
+++ b/dist/common/scripts/scylla_ec2_check
@@ -1,50 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-#
-# Copyright 2018-present ScyllaDB
-#
-
-#
-# SPDX-License-Identifier: AGPL-3.0-or-later
-
-import os
-import sys
-import argparse
-from scylla_util import *
-from subprocess import run
-
-if __name__ == '__main__':
- if not is_ec2():
- sys.exit(0)
- parser = argparse.ArgumentParser(description='Verify EC2 configuration is optimized.')
- parser.add_argument('--nic', default='eth0',
- help='specify NIC')
- args = parser.parse_args()
-
- if not is_valid_nic(args.nic):
- print('NIC {} doesn\'t exist.'.format(args.nic))
- sys.exit(1)
-
- aws = aws_instance()
- instance_class = aws.instance_class()
- en = aws.get_en_interface_type()
- match = re.search(r'^driver: (\S+)$', run('ethtool -i {}'.format(args.nic), shell=True, check=True, capture_output=True, encoding='utf-8').stdout.strip(), flags=re.MULTILINE)
- driver = match.group(1)
-
- if not en:
- colorprint('{red}{instance_class} doesn\'t support enhanced networking!{nocolor}', instance_class=instance_class)
- print('''To enable enhanced networking, please use the instance type which supports it.
-More documentation available at:
-http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking''')
- sys.exit(1)
- elif not aws.is_vpc_enabled(args.nic):
- colorprint('{red}VPC is not enabled!{nocolor}')
- print('To enable enhanced networking, please enable VPC.')
- sys.exit(1)
- elif driver != en:
- colorprint('{red}Enhanced networking is disabled!{nocolor}')
- print('''More documentation available at:
-http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html''')
- sys.exit(1)
-
- colorprint('{green}This EC2 instance is optimized for Scylla.{nocolor}')
diff --git a/dist/common/scripts/scylla_io_setup b/dist/common/scripts/scylla_io_setup
--- a/dist/common/scripts/scylla_io_setup
+++ b/dist/common/scripts/scylla_io_setup
@@ -30,12 +30,6 @@ _smp = r"(?:\s*--smp" + _scyllaeq + r"(?P<smp>\d+))"
def _reopt(s):
return s + r"?"

-
-def is_developer_mode():
- f = open(etcdir() + "/scylla.d/dev-mode.conf", "r")
- pattern = re.compile(_nocomment + r".*developer-mode" + _scyllaeq + "(1|true)")
- return len([x for x in f if pattern.match(x)]) >= 1
-
class scylla_cpuinfo:
"""Class containing information about how Scylla sees CPUs in this machine.
Information that can be probed include in which hyperthreads Scylla is configured
@@ -167,278 +161,13 @@ if __name__ == "__main__":
print('Requires root permission.')
sys.exit(1)
parser = argparse.ArgumentParser(description='IO Setup script for Scylla.')
+ # keep --ami just for compatibility
parser.add_argument('--ami', dest='ami', action='store_true',
help='configure AWS AMI')
args = parser.parse_args()

cpudata = scylla_cpuinfo()
if not is_developer_mode():
- if args.ami:
- idata = aws_instance()
-
- if not idata.is_supported_instance_class():
- logging.error('{} is not supported instance type, run "scylla_io_setup" again without --ami option.'.format(idata.instance()))
- sys.exit(1)
- disk_properties = {}
- disk_properties["mountpoint"] = datadir()
- nr_disks = len(idata.ephemeral_disks())
- ## both i3 and i2 can run with 1 I/O Queue per shard
- if idata.instance() == "i3.large":
- disk_properties["read_iops"] = 111000
- disk_properties["read_bandwidth"] = 653925080
- disk_properties["write_iops"] = 36800
- disk_properties["write_bandwidth"] = 215066473
- elif idata.instance() == "i3.xlarge":
- disk_properties["read_iops"] = 200800
- disk_properties["read_bandwidth"] = 1185106376
- disk_properties["write_iops"] = 53180
- disk_properties["write_bandwidth"] = 423621267
- elif idata.instance_class() == "i3":
- disk_properties["read_iops"] = 411200 * nr_disks
- disk_properties["read_bandwidth"] = 2015342735 * nr_disks
- disk_properties["write_iops"] = 181500 * nr_disks
- disk_properties["write_bandwidth"] = 808775652 * nr_disks
- elif idata.instance_class() == "i3en":
- if idata.instance() == "i3en.large":
- disk_properties["read_iops"] = 43315
- disk_properties["read_bandwidth"] = 330301440
- disk_properties["write_iops"] = 33177
- disk_properties["write_bandwidth"] = 165675008
- elif idata.instance() in ("i3en.xlarge", "i3en.2xlarge"):
- disk_properties["read_iops"] = 84480 * nr_disks
- disk_properties["read_bandwidth"] = 666894336 * nr_disks
- disk_properties["write_iops"] = 66969 * nr_disks
- disk_properties["write_bandwidth"] = 333447168 * nr_disks
- else:
- disk_properties["read_iops"] = 257024 * nr_disks
- disk_properties["read_bandwidth"] = 2043674624 * nr_disks
- disk_properties["write_iops"] = 174080 * nr_disks
- disk_properties["write_bandwidth"] = 1024458752 * nr_disks
- elif idata.instance_class() == "i2":
- disk_properties["read_iops"] = 64000 * nr_disks
- disk_properties["read_bandwidth"] = 507338935 * nr_disks
- disk_properties["write_iops"] = 57100 * nr_disks
- disk_properties["write_bandwidth"] = 483141731 * nr_disks
- elif idata.instance_class() in ("c6gd", "m6gd", "r6gd", "x2gd"):
- if idata.instance_size() == "medium":
- disk_properties["read_iops"] = 14808
- disk_properties["read_bandwidth"] = 77869147
- disk_properties["write_iops"] = 5972
- disk_properties["write_bandwidth"] = 32820302
- elif idata.instance_size() == "large":
- disk_properties["read_iops"] = 29690
- disk_properties["read_bandwidth"] = 157712240
- disk_properties["write_iops"] = 12148
- disk_properties["write_bandwidth"] = 65978069
- elif idata.instance_size() == "xlarge":
- disk_properties["read_iops"] = 59688
- disk_properties["read_bandwidth"] = 318762880
- disk_properties["write_iops"] = 24449
- disk_properties["write_bandwidth"] = 133311808
- elif idata.instance_size() == "2xlarge":
- disk_properties["read_iops"] = 119353
- disk_properties["read_bandwidth"] = 634795733
- disk_properties["write_iops"] = 49069
- disk_properties["write_bandwidth"] = 266841680
- elif idata.instance_size() == "4xlarge":
- disk_properties["read_iops"] = 237196
- disk_properties["read_bandwidth"] = 1262309504
- disk_properties["write_iops"] = 98884
- disk_properties["write_bandwidth"] = 533938080
- elif idata.instance_size() == "8xlarge":
- disk_properties["read_iops"] = 442945
- disk_properties["read_bandwidth"] = 2522688939
- disk_properties["write_iops"] = 166021
- disk_properties["write_bandwidth"] = 1063041152
- elif idata.instance_size() == "12xlarge":
- disk_properties["read_iops"] = 353691 * nr_disks
- disk_properties["read_bandwidth"] = 1908192256 * nr_disks
- disk_properties["write_iops"] = 146732 * nr_disks
- disk_properties["write_bandwidth"] = 806399360 * nr_disks
- elif idata.instance_size() == "16xlarge":
- disk_properties["read_iops"] = 426893 * nr_disks
- disk_properties["read_bandwidth"] = 2525781589 * nr_disks
- disk_properties["write_iops"] = 161740 * nr_disks
- disk_properties["write_bandwidth"] = 1063389952 * nr_disks
- elif idata.instance_size() == "metal":
- disk_properties["read_iops"] = 416257 * nr_disks
- disk_properties["read_bandwidth"] = 2527296683 * nr_disks
- disk_properties["write_iops"] = 156326 * nr_disks
- disk_properties["write_bandwidth"] = 1063657088 * nr_disks
- elif idata.instance() == "im4gn.large":
- disk_properties["read_iops"] = 33943
- disk_properties["read_bandwidth"] = 288433525
- disk_properties["write_iops"] = 27877
- disk_properties["write_bandwidth"] = 126864680
- elif idata.instance() == "im4gn.xlarge":
- disk_properties["read_iops"] = 68122
- disk_properties["read_bandwidth"] = 576603520
- disk_properties["write_iops"] = 55246
- disk_properties["write_bandwidth"] = 254534954
- elif idata.instance() == "im4gn.2xlarge":
- disk_properties["read_iops"] = 136422
- disk_properties["read_bandwidth"] = 1152663765
- disk_properties["write_iops"] = 92184
- disk_properties["write_bandwidth"] = 508926453
- elif idata.instance() == "im4gn.4xlarge":
- disk_properties["read_iops"] = 273050
- disk_properties["read_bandwidth"] = 1638427264
- disk_properties["write_iops"] = 92173
- disk_properties["write_bandwidth"] = 1027966826
- elif idata.instance() == "im4gn.8xlarge":
- disk_properties["read_iops"] = 250241 * nr_disks
- disk_properties["read_bandwidth"] = 1163130709 * nr_disks
- disk_properties["write_iops"] = 86374 * nr_disks
- disk_properties["write_bandwidth"] = 977617664 * nr_disks
- elif idata.instance() == "im4gn.16xlarge":
- disk_properties["read_iops"] = 273030 * nr_disks
- disk_properties["read_bandwidth"] = 1638211413 * nr_disks
- disk_properties["write_iops"] = 92607 * nr_disks
- disk_properties["write_bandwidth"] = 1028340266 * nr_disks
- elif idata.instance() == "is4gen.medium":
- disk_properties["read_iops"] = 33965
- disk_properties["read_bandwidth"] = 288462506
- disk_properties["write_iops"] = 27876
- disk_properties["write_bandwidth"] = 126954200
- elif idata.instance() == "is4gen.large":
- disk_properties["read_iops"] = 68131
- disk_properties["read_bandwidth"] = 576654869
- disk_properties["write_iops"] = 55257
- disk_properties["write_bandwidth"] = 254551002
- elif idata.instance() == "is4gen.xlarge":
- disk_properties["read_iops"] = 136413
- disk_properties["read_bandwidth"] = 1152747904
- disk_properties["write_iops"] = 92180
- disk_properties["write_bandwidth"] = 508889546
- elif idata.instance() == "is4gen.2xlarge":
- disk_properties["read_iops"] = 273038
- disk_properties["read_bandwidth"] = 1628982613
- disk_properties["write_iops"] = 92182
- disk_properties["write_bandwidth"] = 1027983530
- elif idata.instance() == "is4gen.4xlarge":
- disk_properties["read_iops"] = 260493 * nr_disks
- disk_properties["read_bandwidth"] = 1217396928 * nr_disks
- disk_properties["write_iops"] = 83169 * nr_disks
- disk_properties["write_bandwidth"] = 1000390784 * nr_disks
- elif idata.instance() == "is4gen.8xlarge":
- disk_properties["read_iops"] = 273021 * nr_disks
- disk_properties["read_bandwidth"] = 1656354602 * nr_disks
- disk_properties["write_iops"] = 92233 * nr_disks
- disk_properties["write_bandwidth"] = 1028010325 * nr_disks
- if "read_iops" in disk_properties:
- properties_file = open(etcdir() + "/scylla.d/io_properties.yaml", "w")
- yaml.dump({ "disks": [ disk_properties ] }, properties_file, default_flow_style=False)
- ioconf = open(etcdir() + "/scylla.d/io.conf", "w")
- ioconf.write("SEASTAR_IO=\"--io-properties-file={}\"\n".format(properties_file.name))
- else:
- logging.info('This is a supported AWS instance type but there are no preconfigured IO scheduler parameters for it. Running manual iotune.')
- run_iotune()
- elif gcp_instance().is_gce_instance():
- idata = gcp_instance()
-
- if idata.is_recommended_instance():
- disk_properties = {}
- disk_properties["mountpoint"] = datadir()
- nr_disks = idata.nvmeDiskCount
- # below is based on https://cloud.google.com/compute/docs/disks/local-ssd#performance
- # and https://cloud.google.com/compute/docs/disks/local-ssd#nvme
- # note that scylla iotune might measure more, this is GCP recommended
- mbs=1024*1024
- if nr_disks >= 1 and nr_disks < 4:
- disk_properties["read_iops"] = 170000 * nr_disks
- disk_properties["read_bandwidth"] = 660 * mbs * nr_disks
- disk_properties["write_iops"] = 90000 * nr_disks
- disk_properties["write_bandwidth"] = 350 * mbs * nr_disks
- elif nr_disks >= 4 and nr_disks <= 8:
- disk_properties["read_iops"] = 680000
- disk_properties["read_bandwidth"] = 2650 * mbs
- disk_properties["write_iops"] = 360000
- disk_properties["write_bandwidth"] = 1400 * mbs
- elif nr_disks == 16:
- disk_properties["read_iops"] = 1600000
- disk_properties["read_bandwidth"] = 4521251328
- #below is google, above is our measured
- #disk_properties["read_bandwidth"] = 6240 * mbs
- disk_properties["write_iops"] = 800000
- disk_properties["write_bandwidth"] = 2759452672
- #below is google, above is our measured
- #disk_properties["write_bandwidth"] = 3120 * mbs
- elif nr_disks == 24:
- disk_properties["read_iops"] = 2400000
- disk_properties["read_bandwidth"] = 5921532416
- #below is google, above is our measured
- #disk_properties["read_bandwidth"] = 9360 * mbs
- disk_properties["write_iops"] = 1200000
- disk_properties["write_bandwidth"] = 4663037952
- #below is google, above is our measured
- #disk_properties["write_bandwidth"] = 4680 * mbs
- if "read_iops" in disk_properties:
- properties_file = open(etcdir() + "/scylla.d/io_properties.yaml", "w")
- yaml.dump({"disks": [disk_properties]}, properties_file, default_flow_style=False)
- ioconf = open(etcdir() + "/scylla.d/io.conf", "w")
- ioconf.write("SEASTAR_IO=\"--io-properties-file={}\"\n".format(properties_file.name))
- else:
- logging.error(
- 'Did not detect number of disks in Google Cloud instance setup for auto local disk tuning, running manual iotune.')
- run_iotune()
- else:
- logging.error(
- 'This is not a recommended Google Cloud instance setup for auto local disk tuning, running manual iotune.')
- run_iotune()
- elif azure_instance().is_azure_instance():
- idata = azure_instance()
- if idata.is_recommended_instance():
- disk_properties = {}
- disk_properties["mountpoint"] = datadir()
- nr_disks = idata.nvmeDiskCount
- # below is based on https://docs.microsoft.com/en-us/azure/virtual-machines/lsv2-series
- # note that scylla iotune might measure more, this is Azure recommended
- # since write properties are not defined, they come from our iotune tests
- mbs = 1024*1024
- if nr_disks == 1:
- disk_properties["read_iops"] = 400000
- disk_properties["read_bandwidth"] = 2000 * mbs
- disk_properties["write_iops"] = 271696
- disk_properties["write_bandwidth"] = 1314 * mbs
- elif nr_disks == 2:
- disk_properties["read_iops"] = 800000
- disk_properties["read_bandwidth"] = 4000 * mbs
- disk_properties["write_iops"] = 552434
- disk_properties["write_bandwidth"] = 2478 * mbs
- elif nr_disks == 4:
- disk_properties["read_iops"] = 1500000
- disk_properties["read_bandwidth"] = 8000 * mbs
- disk_properties["write_iops"] = 1105063
- disk_properties["write_bandwidth"] = 4948 * mbs
- elif nr_disks == 6:
- disk_properties["read_iops"] = 2200000
- disk_properties["read_bandwidth"] = 14000 * mbs
- disk_properties["write_iops"] = 1616847
- disk_properties["write_bandwidth"] = 7892 * mbs
- elif nr_disks == 8:
- disk_properties["read_iops"] = 2900000
- disk_properties["read_bandwidth"] = 16000 * mbs
- disk_properties["write_iops"] = 2208081
- disk_properties["write_bandwidth"] = 9694 * mbs
- elif nr_disks == 10:
- disk_properties["read_iops"] = 3800000
- disk_properties["read_bandwidth"] = 20000 * mbs
- disk_properties["write_iops"] = 2546511
- disk_properties["write_bandwidth"] = 11998 * mbs
- if "read_iops" in disk_properties:
- properties_file = open(etcdir() + "/scylla.d/io_properties.yaml", "w")
- yaml.dump({"disks": [disk_properties]}, properties_file, default_flow_style=False)
- ioconf = open(etcdir() + "/scylla.d/io.conf", "w")
- ioconf.write("SEASTAR_IO=\"--io-properties-file={}\"\n".format(properties_file.name))
- else:
- logging.error(
- 'Did not detect number of disks in Azure Cloud instance setup for auto local disk tuning, running manual iotune.')
- run_iotune()
- else:
- logging.error(
- 'This is not a recommended Azure Cloud instance setup for auto local disk tuning, running manual iotune.')
- run_iotune()
- else:
- run_iotune()
+ run_iotune()
os.chmod(etcdir() + '/scylla.d/io_properties.yaml', 0o644)
os.chmod(etcdir() + '/scylla.d/io.conf', 0o644)
diff --git a/dist/common/scripts/scylla_setup b/dist/common/scripts/scylla_setup
--- a/dist/common/scripts/scylla_setup
+++ b/dist/common/scripts/scylla_setup
@@ -281,7 +281,7 @@ if __name__ == '__main__':
args = parser.parse_args()

if not interactive:
- if not args.no_sysconfig_setup or (is_ec2() and not args.no_ec2_check):
+ if not args.no_sysconfig_setup:
if args.nic:
if not is_valid_nic(args.nic):
print('NIC {} doesn\'t exist.'.format(args.nic))
@@ -304,7 +304,6 @@ if __name__ == '__main__':
enable_service = not args.no_enable_service
if is_redhat_variant():
selinux_setup = not args.no_selinux_setup
- bootparam_setup = not args.no_bootparam_setup
ntp_setup = not args.no_ntp_setup
raid_setup = not args.no_raid_setup
raid_level_5 = args.raid_level_5
@@ -329,14 +328,6 @@ if __name__ == '__main__':
def interactive_ask_service(msg1, msg2, default = None):
return when_interactive_ask_service(interactive, msg1, msg2, default)

- if is_ec2():
- ec2_check = interactive_ask_service('Do you want to run Amazon EC2 configuration check?', 'Yes - runs a script to verify that this instance is optimized for running Scylla. No - skips the configuration check.', ec2_check)
- args.no_ec2_check = not ec2_check
- if ec2_check:
- if interactive:
- args.nic = interactive_choose_nic()
- run('{}/scylla_ec2_check --nic {}'.format(scriptsdir(), args.nic), shell=True, check=True)
-
if not is_nonroot():
kernel_check = interactive_ask_service('Do you want to run check your kernel version?', 'Yes - runs a script to verify that the kernel for this instance qualifies to run Scylla. No - skips the kernel check.', kernel_check)
args.no_kernel_check = not kernel_check
@@ -403,12 +394,6 @@ if __name__ == '__main__':
if res != 0:
selinux_reboot_required=True

- if args.ami:
- bootparam_setup = interactive_ask_service('Do you want set clock source as bootloader option?', 'Yes - set clock source at boot time. No - skips this step.', bootparam_setup)
- args.no_bootparam_setup = not bootparam_setup
- if bootparam_setup:
- run_setup_script('boot parameter', 'scylla_bootparam_setup --ami')
-
ntp_setup = interactive_ask_service('Do you want to setup Network Time Protocol(NTP) to auto-synchronize the current time on the node?', 'Yes - enables time-synchronization. This keeps the correct time on the node. No - skips this step.', ntp_setup)
args.no_ntp_setup = not ntp_setup
if ntp_setup:
diff --git a/dist/common/scripts/scylla_util.py b/dist/common/scripts/scylla_util.py
--- a/dist/common/scripts/scylla_util.py
+++ b/dist/common/scripts/scylla_util.py
@@ -5,20 +5,12 @@
import configparser
import glob
import io
-import json
-import logging
import os
import re
import shlex
import shutil
import subprocess
-import time
-import urllib.error
-import urllib.parse
-import urllib.request
import yaml
-import psutil
-import socket
import sys
from pathlib import Path, PurePath
from subprocess import run, DEVNULL
@@ -90,651 +82,6 @@ def scyllabindir():
def sysconfdir():
return str(sysconfdir_p())

-# @param headers dict of k:v
-def curl(url, headers=None, byte=False, timeout=3, max_retries=5, retry_interval=5):
- retries = 0
- while True:
- try:
- req = urllib.request.Request(url, headers=headers or {})
- with urllib.request.urlopen(req, timeout=timeout) as res:
- if byte:
- return res.read()
- else:
- return res.read().decode('utf-8')
- except urllib.error.URLError:
- time.sleep(retry_interval)
- retries += 1
- if retries >= max_retries:
- raise
-
-
-class gcp_instance:
- """Describe several aspects of the current GCP instance"""
-
- EPHEMERAL = "ephemeral"
- PERSISTENT = "persistent"
- ROOT = "root"
- GETTING_STARTED_URL = "http://www.scylladb.com/doc/getting-started-google/"
- META_DATA_BASE_URL = "http://metadata.google.internal/computeMetadata/v1/instance/"
- ENDPOINT_SNITCH = "GoogleCloudSnitch"
-
- def __init__(self):
- self.__type = None
- self.__cpu = None
- self.__memoryGB = None
- self.__nvmeDiskCount = None
- self.__firstNvmeSize = None
- self.__osDisks = None
-
- @staticmethod
- def is_gce_instance():
- """Check if it's GCE instance via DNS lookup to metadata server."""
- try:
- addrlist = socket.getaddrinfo('metadata.google.internal', 80)
- except socket.gaierror:
- return False
- for res in addrlist:
- af, socktype, proto, canonname, sa = res
- if af == socket.AF_INET:
- addr, port = sa
- if addr == "169.254.169.254":
- # Make sure it is not on GKE
- try:
- gcp_instance().__instance_metadata("machine-type")
- except urllib.error.HTTPError:
- return False
- return True
- return False
-
- def __instance_metadata(self, path, recursive=False):
- return curl(self.META_DATA_BASE_URL + path + "?recursive=%s" % str(recursive).lower(),
- headers={"Metadata-Flavor": "Google"})
-
- def is_in_root_devs(self, x, root_devs):
- for root_dev in root_devs:
- if root_dev.startswith(os.path.join("/dev/", x)):
- return True
- return False
-
- def _non_root_nvmes(self):
- """get list of nvme disks from os, filter away if one of them is root"""
- nvme_re = re.compile(r"nvme\d+n\d+$")
-
- root_dev_candidates = [x for x in psutil.disk_partitions() if x.mountpoint == "/"]
-
- root_devs = [x.device for x in root_dev_candidates]
-
- nvmes_present = list(filter(nvme_re.match, os.listdir("/dev")))
- return {self.ROOT: root_devs, self.EPHEMERAL: [x for x in nvmes_present if not self.is_in_root_devs(x, root_devs)]}
-
- def _non_root_disks(self):
- """get list of disks from os, filter away if one of them is root"""
- disk_re = re.compile(r"/dev/sd[b-z]+$")
-
- root_dev_candidates = [x for x in psutil.disk_partitions() if x.mountpoint == "/"]
-
- root_devs = [x.device for x in root_dev_candidates]
-
- disks_present = list(filter(disk_re.match, glob.glob("/dev/sd*")))
- return {self.PERSISTENT: [x.lstrip('/dev/') for x in disks_present if not self.is_in_root_devs(x.lstrip('/dev/'), root_devs)]}
-
- @property
- def os_disks(self):
- """populate disks from /dev/ and root mountpoint"""
- if self.__osDisks is None:
- __osDisks = {}
- nvmes_present = self._non_root_nvmes()
- for k, v in nvmes_present.items():
- __osDisks[k] = v
- disks_present = self._non_root_disks()
- for k, v in disks_present.items():
- __osDisks[k] = v
- self.__osDisks = __osDisks
- return self.__osDisks
-
- def getEphemeralOsDisks(self):
- """return just transient disks"""
- return self.os_disks[self.EPHEMERAL]
-
- def getPersistentOsDisks(self):
- """return just persistent disks"""
- return self.os_disks[self.PERSISTENT]
-
- @staticmethod
- def isNVME(gcpdiskobj):
- """check if disk from GCP metadata is a NVME disk"""
- if gcpdiskobj["interface"]=="NVME":
- return True
- return False
-
- def __get_nvme_disks_from_metadata(self):
- """get list of nvme disks from metadata server"""
- try:
- disksREST=self.__instance_metadata("disks", True)
- disksobj=json.loads(disksREST)
- nvmedisks=list(filter(self.isNVME, disksobj))
- except Exception as e:
- print ("Problem when parsing disks from metadata:")
- print (e)
- nvmedisks={}
- return nvmedisks
-
- @property
- def nvmeDiskCount(self):
- """get # of nvme disks available for scylla raid"""
- if self.__nvmeDiskCount is None:
- try:
- ephemeral_disks = self.getEphemeralOsDisks()
- count_os_disks=len(ephemeral_disks)
- except Exception as e:
- print ("Problem when parsing disks from OS:")
- print (e)
- count_os_disks=0
- nvme_metadata_disks = self.__get_nvme_disks_from_metadata()
- count_metadata_nvme_disks=len(nvme_metadata_disks)
- self.__nvmeDiskCount = count_os_disks if count_os_disks<count_metadata_nvme_disks else count_metadata_nvme_disks
- return self.__nvmeDiskCount
-
- @property
- def instancetype(self):
- """return the type of this instance, e.g. n2-standard-2"""
- if self.__type is None:
- self.__type = self.__instance_metadata("machine-type").split("/")[-1]
- return self.__type
-
- @property
- def cpu(self):
- """return the # of cpus of this instance"""
- if self.__cpu is None:
- self.__cpu = psutil.cpu_count()
- return self.__cpu
-
- @property
- def memoryGB(self):
- """return the size of memory in GB of this instance"""
- if self.__memoryGB is None:
- self.__memoryGB = psutil.virtual_memory().total/1024/1024/1024
- return self.__memoryGB
-
- def instance_size(self):
- """Returns the size of the instance we are running in. i.e.: 2"""
- instancetypesplit = self.instancetype.split("-")
- return instancetypesplit[2] if len(instancetypesplit)>2 else 0
-
- def instance_class(self):
- """Returns the class of the instance we are running in. i.e.: n2"""
- return self.instancetype.split("-")[0]
-
- def instance_purpose(self):
- """Returns the purpose of the instance we are running in. i.e.: standard"""
- return self.instancetype.split("-")[1]
-
- m1supported="m1-megamem-96" #this is the only exception of supported m1 as per https://cloud.google.com/compute/docs/machine-types#m1_machine_types
-
- def is_unsupported_instance_class(self):
- """Returns if this instance type belongs to unsupported ones for nvmes"""
- if self.instancetype == self.m1supported:
- return False
- if self.instance_class() in ['e2', 'f1', 'g1', 'm2', 'm1']:
- return True
- return False
-
- def is_supported_instance_class(self):
- """Returns if this instance type belongs to supported ones for nvmes"""
- if self.instancetype == self.m1supported:
- return True
- if self.instance_class() in ['n1', 'n2', 'n2d' ,'c2']:
- return True
- return False
-
- def is_recommended_instance_size(self):
- """if this instance has at least 2 cpus, it has a recommended size"""
- if int(self.instance_size()) > 1:
- return True
- return False
-
- @staticmethod
- def get_file_size_by_seek(filename):
- "Get the file size by seeking at end"
- fd= os.open(filename, os.O_RDONLY)
- try:
- return os.lseek(fd, 0, os.SEEK_END)
- finally:
- os.close(fd)
-
- # note that GCP has 3TB physical devices actually, which they break into smaller 375GB disks and share the same mem with multiple machines
- # this is a reference value, disk size shouldn't be lower than that
- GCP_NVME_DISK_SIZE_2020=375
-
- @property
- def firstNvmeSize(self):
- """return the size of first non root NVME disk in GB"""
- if self.__firstNvmeSize is None:
- ephemeral_disks = self.getEphemeralOsDisks()
- if len(ephemeral_disks) > 0:
- firstDisk = ephemeral_disks[0]
- firstDiskSize = self.get_file_size_by_seek(os.path.join("/dev/", firstDisk))
- firstDiskSizeGB = firstDiskSize/1024/1024/1024
- if firstDiskSizeGB >= self.GCP_NVME_DISK_SIZE_2020:
- self.__firstNvmeSize = firstDiskSizeGB
- else:
- self.__firstNvmeSize = 0
- logging.warning("First nvme is smaller than lowest expected size. ".format(firstDisk))
- else:
- self.__firstNvmeSize = 0
- return self.__firstNvmeSize
-
- def is_recommended_instance(self):
- if not self.is_unsupported_instance_class() and self.is_supported_instance_class() and self.is_recommended_instance_size():
- # at least 1:2GB cpu:ram ratio , GCP is at 1:4, so this should be fine
- if self.cpu/self.memoryGB < 0.5:
- diskCount = self.nvmeDiskCount
- # to reach max performance for > 16 disks we mandate 32 or more vcpus
- # https://cloud.google.com/compute/docs/disks/local-ssd#performance
- if diskCount >= 16 and self.cpu < 32:
- logging.warning(
- "This machine doesn't have enough CPUs for allocated number of NVMEs (at least 32 cpus for >=16 disks). Performance will suffer.")
- return False
- if diskCount < 1:
- logging.warning("No ephemeral disks were found.")
- return False
- diskSize = self.firstNvmeSize
- max_disktoramratio = 105
- # 30:1 Disk/RAM ratio must be kept at least(AWS), we relax this a little bit
- # on GCP we are OK with {max_disktoramratio}:1 , n1-standard-2 can cope with 1 disk, not more
- disktoramratio = (diskCount * diskSize) / self.memoryGB
- if (disktoramratio > max_disktoramratio):
- logging.warning(
- f"Instance disk-to-RAM ratio is {disktoramratio}, which is higher than the recommended ratio {max_disktoramratio}. Performance may suffer.")
- return False
- return True
- else:
- logging.warning("At least 2G of RAM per CPU is needed. Performance will suffer.")
- return False
-
- def private_ipv4(self):
- return self.__instance_metadata("network-interfaces/0/ip")
-
- @staticmethod
- def check():
- pass
-
- @staticmethod
- def io_setup():
- return run('/opt/scylladb/scripts/scylla_io_setup', shell=True, check=True)
-
- @property
- def user_data(self):
- try:
- return self.__instance_metadata("attributes/user-data")
- except urllib.error.HTTPError: # empty user-data
- return ""
-
-
-class azure_instance:
- """Describe several aspects of the current Azure instance"""
-
- EPHEMERAL = "ephemeral"
- PERSISTENT = "persistent"
- ROOT = "root"
- GETTING_STARTED_URL = "http://www.scylladb.com/doc/getting-started-azure/"
- ENDPOINT_SNITCH = "AzureSnitch"
- META_DATA_BASE_URL = "http://169.254.169.254/metadata/instance"
-
- def __init__(self):
- self.__type = None
- self.__cpu = None
- self.__location = None
- self.__zone = None
- self.__memoryGB = None
- self.__nvmeDiskCount = None
- self.__firstNvmeSize = None
- self.__osDisks = None
-
- @staticmethod
- def is_azure_instance():
- """Check if it's Azure instance via DNS lookup to metadata server."""
- try:
- addrlist = socket.getaddrinfo('metadata.azure.internal', 80)
- except socket.gaierror:
- return False
- return True
-
-# as per https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service?tabs=windows#supported-api-versions
- API_VERSION = "?api-version=2021-01-01"
-
- def __instance_metadata(self, path):
- """query Azure metadata server"""
- return curl(self.META_DATA_BASE_URL + path + self.API_VERSION + "&format=text", headers = { "Metadata": "True" })
-
- def is_in_root_devs(self, x, root_devs):
- for root_dev in root_devs:
- if root_dev.startswith(os.path.join("/dev/", x)):
- return True
- return False
-
- def _non_root_nvmes(self):
- """get list of nvme disks from os, filter away if one of them is root"""
- nvme_re = re.compile(r"nvme\d+n\d+$")
-
- root_dev_candidates = [x for x in psutil.disk_partitions() if x.mountpoint == "/"]
- if len(root_dev_candidates) != 1:
- raise Exception("found more than one disk mounted at root ".format(root_dev_candidates))
-
- root_devs = [x.device for x in root_dev_candidates]
-
- nvmes_present = list(filter(nvme_re.match, os.listdir("/dev")))
- return {self.ROOT: root_devs, self.EPHEMERAL: [x for x in nvmes_present if not self.is_in_root_devs(x, root_devs)]}
-
- def _non_root_disks(self):
- """get list of disks from os, filter away if one of them is root"""
- disk_re = re.compile(r"/dev/sd[b-z]+$")
-
- root_dev_candidates = [x for x in psutil.disk_partitions() if x.mountpoint == "/"]
-
- root_devs = [x.device for x in root_dev_candidates]
-
- disks_present = list(filter(disk_re.match, glob.glob("/dev/sd*")))
- return {self.PERSISTENT: [x.lstrip('/dev/') for x in disks_present if not self.is_in_root_devs(x.lstrip('/dev/'), root_devs)]}
-
- @property
- def os_disks(self):
- """populate disks from /dev/ and root mountpoint"""
- if self.__osDisks is None:
- __osDisks = {}
- nvmes_present = self._non_root_nvmes()
- for k, v in nvmes_present.items():
- __osDisks[k] = v
- disks_present = self._non_root_disks()
- for k, v in disks_present.items():
- __osDisks[k] = v
- self.__osDisks = __osDisks
- return self.__osDisks
-
- def getEphemeralOsDisks(self):
- """return just transient disks"""
- return self.os_disks[self.EPHEMERAL]
-
- def getPersistentOsDisks(self):
- """return just persistent disks"""
- return self.os_disks[self.PERSISTENT]
-
- @property
- def nvmeDiskCount(self):
- """get # of nvme disks available for scylla raid"""
- if self.__nvmeDiskCount is None:
- try:
- ephemeral_disks = self.getEphemeralOsDisks()
- count_os_disks = len(ephemeral_disks)
- except Exception as e:
- print("Problem when parsing disks from OS:")
- print(e)
- count_os_disks = 0
- count_metadata_nvme_disks = self.__get_nvme_disks_count_from_metadata()
- self.__nvmeDiskCount = count_os_disks if count_os_disks < count_metadata_nvme_disks else count_metadata_nvme_disks
- return self.__nvmeDiskCount
-
- instanceToDiskCount = {
- "L8s": 1,
- "L16s": 2,
- "L32s": 4,
- "L48s": 6,
- "L64s": 8,
- "L80s": 10
- }
-
- def __get_nvme_disks_count_from_metadata(self):
- #storageProfile in VM metadata lacks the number of NVMEs, it's hardcoded based on VM type
- return self.instanceToDiskCount.get(self.instance_class(), 0)
-
- @property
- def instancelocation(self):
- """return the location of this instance, e.g. eastus"""
- if self.__location is None:
- self.__location = self.__instance_metadata("location")
- return self.__location
-
- @property
- def instancezone(self):
- """return the zone of this instance, e.g. 1"""
- if self.__zone is None:
- self.__zone = self.__instance_metadata("zone")
- return self.__zone
-
- @property
- def instancetype(self):
- """return the type of this instance, e.g. Standard_L8s_v2"""
- if self.__type is None:
- self.__type = self.__instance_metadata("/compute/vmSize")
- return self.__type
-
- @property
- def cpu(self):
- """return the # of cpus of this instance"""
- if self.__cpu is None:
- self.__cpu = psutil.cpu_count()
- return self.__cpu
-
- @property
- def memoryGB(self):
- """return the size of memory in GB of this instance"""
- if self.__memoryGB is None:
- self.__memoryGB = psutil.virtual_memory().total/1024/1024/1024
- return self.__memoryGB
-
- def instance_purpose(self):
- """Returns the class of the instance we are running in. i.e.: Standard"""
- return self.instancetype.split("_")[0]
-
- def instance_class(self):
- """Returns the purpose of the instance we are running in. i.e.: L8s"""
- return self.instancetype.split("_")[1]
-
- def is_unsupported_instance_class(self):
- """Returns if this instance type belongs to unsupported ones for nvmes"""
- return False
-
- def is_supported_instance_class(self):
- """Returns if this instance type belongs to supported ones for nvmes"""
- if self.instance_class() in list(self.instanceToDiskCount.keys()):
- return True
- return False
-
- def is_recommended_instance_size(self):
- """if this instance has at least 2 cpus, it has a recommended size"""
- if int(self.instance_size()) > 1:
- return True
- return False
-
- def is_recommended_instance(self):
- if self.is_unsupported_instance_class() and self.is_supported_instance_class():
- return True
- return False
-
- def private_ipv4(self):
- return self.__instance_metadata("/network/interface/0/ipv4/ipAddress/0/privateIpAddress")
-
- @staticmethod
- def check():
- pass
-
- @staticmethod
- def io_setup():
- return run('/opt/scylladb/scripts/scylla_io_setup', shell=True, check=True)
-
-class aws_instance:
- """Describe several aspects of the current AWS instance"""
- GETTING_STARTED_URL = "http://www.scylladb.com/doc/getting-started-amazon/"
- META_DATA_BASE_URL = "http://169.254.169.254/latest/"
- ENDPOINT_SNITCH = "Ec2Snitch"
-
- def __disk_name(self, dev):
- name = re.compile(r"(?:/dev/)?(?P<devname>[a-zA-Z]+)\d*")
- return name.search(dev).group("devname")
-
- def __instance_metadata(self, path):
- return curl(self.META_DATA_BASE_URL + "meta-data/" + path)
-
- def __device_exists(self, dev):
- if dev[0:4] != "/dev":
- dev = "/dev/%s" % dev
- return os.path.exists(dev)
-
- def __xenify(self, devname):
- dev = self.__instance_metadata('block-device-mapping/' + devname)
- return dev.replace("sd", "xvd")
-
- def __filter_nvmes(self, dev, dev_type):
- nvme_re = re.compile(r"(nvme\d+)n\d+$")
- match = nvme_re.match(dev)
- if not match:
- return False
- nvme_name = match.group(1)
- with open(f'/sys/class/nvme/{nvme_name}/model') as f:
- model = f.read().strip()
- if dev_type == 'ephemeral':
- return model != 'Amazon Elastic Block Store'
- else:
- return model == 'Amazon Elastic Block Store'
-
- def _non_root_nvmes(self):
- nvme_re = re.compile(r"nvme\d+n\d+$")
-
- root_dev_candidates = [ x for x in psutil.disk_partitions() if x.mountpoint == "/" ]
- if len(root_dev_candidates) != 1:
- raise Exception("found more than one disk mounted at root'".format(root_dev_candidates))
-
- root_dev = root_dev_candidates[0].device
- if root_dev == '/dev/root':
- root_dev = run('findmnt -n -o SOURCE /', shell=True, check=True, capture_output=True, encoding='utf-8').stdout.strip()
- ephemeral_present = list(filter(lambda x: self.__filter_nvmes(x, 'ephemeral'), os.listdir("/dev")))
- ebs_present = list(filter(lambda x: self.__filter_nvmes(x, 'ebs'), os.listdir("/dev")))
- return {"root": [ root_dev ], "ephemeral": ephemeral_present, "ebs": [ x for x in ebs_present if not root_dev.startswith(os.path.join("/dev/", x))] }
-
- def __populate_disks(self):
- devmap = self.__instance_metadata("block-device-mapping")
- self._disks = {}
- devname = re.compile("^\D+")
- nvmes_present = self._non_root_nvmes()
- for k,v in nvmes_present.items():
- self._disks[k] = v
-
- for dev in devmap.splitlines():
- t = devname.match(dev).group()
- if t == "ephemeral" and nvmes_present:
- continue
- if t not in self._disks:
- self._disks[t] = []
- if not self.__device_exists(self.__xenify(dev)):
- continue
- self._disks[t] += [self.__xenify(dev)]
- if not 'ebs' in self._disks:
- self._disks['ebs'] = []
-
- def __mac_address(self, nic='eth0'):
- with open('/sys/class/net/{}/address'.format(nic)) as f:
- return f.read().strip()
-
- def __init__(self):
- self._type = self.__instance_metadata("instance-type")
- self.__populate_disks()
-
- @classmethod
- def is_aws_instance(cls):
- """Check if it's AWS instance via query to metadata server."""
- try:
- curl(cls.META_DATA_BASE_URL, max_retries=2, retry_interval=1)
- return True
- except (urllib.error.URLError, urllib.error.HTTPError):
- return False
-
- def instance(self):
- """Returns which instance we are running in. i.e.: i3.16xlarge"""
- return self._type
-
- def instance_size(self):
- """Returns the size of the instance we are running in. i.e.: 16xlarge"""
- return self._type.split(".")[1]
-
- def instance_class(self):
- """Returns the class of the instance we are running in. i.e.: i3"""
- return self._type.split(".")[0]
-
- def is_supported_instance_class(self):
- if self.instance_class() in ['i2', 'i3', 'i3en', 'c5d', 'm5d', 'm5ad', 'r5d', 'z1d', 'c6gd', 'm6gd', 'r6gd', 'x2gd', 'im4gn', 'is4gen']:
- return True
- return False
-
- def get_en_interface_type(self):
- instance_class = self.instance_class()
- instance_size = self.instance_size()
- if instance_class in ['c3', 'c4', 'd2', 'i2', 'r3']:
- return 'ixgbevf'
- if instance_class in ['a1', 'c5', 'c5a', 'c5d', 'c5n', 'c6g', 'c6gd', 'f1', 'g3', 'g4', 'h1', 'i3', 'i3en', 'inf1', 'm5', 'm5a', 'm5ad', 'm5d', 'm5dn', 'm5n', 'm6g', 'm6gd', 'p2', 'p3', 'r4', 'r5', 'r5a', 'r5ad', 'r5b', 'r5d', 'r5dn', 'r5n', 't3', 't3a', 'u-6tb1', 'u-9tb1', 'u-12tb1', 'u-18tn1', 'u-24tb1', 'x1', 'x1e', 'z1d', 'c6g', 'c6gd', 'm6g', 'm6gd', 't4g', 'r6g', 'r6gd', 'x2gd', 'im4gn', 'is4gen']:
- return 'ena'
- if instance_class == 'm4':
- if instance_size == '16xlarge':
- return 'ena'
- else:
- return 'ixgbevf'
- return None
-
- def disks(self):
- """Returns all disks in the system, as visible from the AWS registry"""
- disks = set()
- for v in list(self._disks.values()):
- disks = disks.union([self.__disk_name(x) for x in v])
- return disks
-
- def root_device(self):
- """Returns the device being used for root data. Unlike root_disk(),
- which will return a device name (i.e. xvda), this function will return
- the full path to the root partition as returned by the AWS instance
- metadata registry"""
- return set(self._disks["root"])
-
- def root_disk(self):
- """Returns the disk used for the root partition"""
- return self.__disk_name(self._disks["root"][0])
-
- def non_root_disks(self):
- """Returns all attached disks but root. Include ephemeral and EBS devices"""
- return set(self._disks["ephemeral"] + self._disks["ebs"])
-
- def ephemeral_disks(self):
- """Returns all ephemeral disks. Include standard SSDs and NVMe"""
- return set(self._disks["ephemeral"])
-
- def ebs_disks(self):
- """Returns all EBS disks"""
- return set(self._disks["ebs"])
-
- def public_ipv4(self):
- """Returns the public IPv4 address of this instance"""
- return self.__instance_metadata("public-ipv4")
-
- def private_ipv4(self):
- """Returns the private IPv4 address of this instance"""
- return self.__instance_metadata("local-ipv4")
-
- def is_vpc_enabled(self, nic='eth0'):
- mac = self.__mac_address(nic)
- mac_stat = self.__instance_metadata('network/interfaces/macs/{}'.format(mac))
- return True if re.search(r'^vpc-id$', mac_stat, flags=re.MULTILINE) else False
-
- @staticmethod
- def check():
- return run('/opt/scylladb/scripts/scylla_ec2_check --nic eth0', shell=True)
-
- @staticmethod
- def io_setup():
- return run('/opt/scylladb/scripts/scylla_io_setup --ami', shell=True, check=True)
-
- @property
- def user_data(self):
- return curl(self.META_DATA_BASE_URL + "user-data")
-
-
def get_id_like():
like = distro.like()
if not like:
@@ -762,6 +109,15 @@ def is_suse_variant():
d = get_id_like() if get_id_like() else distro.id()
return ('suse' in d)

+def is_developer_mode():
+ # non-advancing comment matcher
+ _nocomment = r"^\s*(?!#)"
+ # non-capturing grouping
+ _scyllaeq = r"(?:\s*|=)"
+ f = open(etcdir() + "/scylla.d/dev-mode.conf", "r")
+ pattern = re.compile(_nocomment + r".*developer-mode" + _scyllaeq + "(1|true)")
+ return len([x for x in f if pattern.match(x)]) >= 1
+
def get_text_from_path(fpath):
board_vendor_path = Path(fpath)
if board_vendor_path.exists():
@@ -774,26 +130,6 @@ def match_patterns_in_files(list_of_patterns_files):
return True
return False

-
-def is_ec2():
- return aws_instance.is_aws_instance()
-
-def is_gce():
- return gcp_instance.is_gce_instance()
-
-def is_azure():
- return azure_instance.is_azure_instance()
-
-def get_cloud_instance():
- if is_ec2():
- return aws_instance()
- elif is_gce():
- return gcp_instance()
- elif is_azure():
- return azure_instance()
- else:
- raise Exception("Unknown cloud provider! Only AWS/GCP/Azure supported.")
-
def hex2list(hex_str):
hex_str2 = hex_str.replace("0x", "").replace(",", "")
hex_int = int(hex_str2, 16)
diff --git a/install.sh b/install.sh
--- a/install.sh
+++ b/install.sh
@@ -398,7 +398,7 @@ if $supervisor; then
install -m755 dist/common/supervisor/* -Dt "$rprefix"/supervisor
fi

-SBINFILES=$(cd dist/common/scripts/; ls scylla_*setup node_health_check scylla_ec2_check scylla_kernel_check)
+SBINFILES=$(cd dist/common/scripts/; ls scylla_*setup node_health_check scylla_kernel_check)
SBINFILES+=" $(cd seastar/scripts; ls seastar-cpu-map.sh)"

cat << EOS > "$rprefix"/scripts/scylla_product.py

Commit Bot

<bot@cloudius-systems.com>
unread,
Feb 1, 2022, 9:06:19 AM2/1/22
to scylladb-dev@googlegroups.com, Takuya ASADA
From: Takuya ASADA <sy...@scylladb.com>
Committer: Avi Kivity <a...@scylladb.com>
Branch: master
Reply all
Reply to author
Forward
0 new messages