I'm facing the problem that taurus runs out of memory. The problem occurs after about 15mins. The taskmanager shows that taurus consumes more and more memory. Running the test standalone with jmeter works fine. n the bzt.log I found following entry.
[2019-09-19 08:28:15,678 ERROR root] MemoryError: Unable to allocate array with shape (6144,) and data type int64
File "C:\Program Files\Taurus\pkgs\bzt\cli.py", line 259, in perform
self.engine.run()
File "C:\Program Files\Taurus\pkgs\bzt\engine\engine.py", line 248, in run
reraise(exc_info, exc_value)
File "C:\Program Files\Taurus\pkgs\bzt\six\py3.py", line 86, in reraise
raise exc
File "C:\Program Files\Taurus\pkgs\bzt\engine\engine.py", line 227, in run
self._wait()
File "C:\Program Files\Taurus\pkgs\bzt\engine\engine.py", line 269, in _wait
while not self._check_modules_list():
File "C:\Program Files\Taurus\pkgs\bzt\engine\engine.py", line 256, in _check_modules_list
finished = bool(module.check())
File "C:\Program Files\Taurus\pkgs\bzt\modules\aggregator.py", line 840, in check
for point in self.datapoints():
File "C:\Program Files\Taurus\pkgs\bzt\modules\aggregator.py", line 598, in datapoints
datapoint[DataPoint.CUMULATIVE] = copy.deepcopy(self.cumulative) # FIXME: this line eats RAM like hell!
File "copy.py", line 150, in deepcopy
File "copy.py", line 240, in _deepcopy_dict
File "copy.py", line 161, in deepcopy
File "C:\Program Files\Taurus\pkgs\bzt\modules\aggregator.py", line 217, in __deepcopy__
mycopy = KPISet(self.perc_levels, self[KPISet.RESP_TIMES].high)
File "C:\Program Files\Taurus\pkgs\bzt\modules\aggregator.py", line 212, in __init__
self[KPISet.RESP_TIMES] = RespTimesCounter(1, hist_max_rt, 3, perc_levels)
File "C:\Program Files\Taurus\pkgs\bzt\modules\aggregator.py", line 103, in __init__
self.histogram = HdrHistogram(low, high, sign_figures)
File "C:\Program Files\Taurus\pkgs\hdrpy\__init__.py", line 269, in __init__
self.counts = numpy.zeros(self.counts_len, dtype=numpy.int64)
I'm running bzt-1.13.8.