From e84cb75c26acdc93fa0d508ad2f784c931b6bc2b Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sat, 16 Jun 2018 22:06:18 +0800 Subject: [PATCH 01/25] Convert Configurator error logging to user output - handle all sys.exit in Configurator - introduce UIError, which is used for generating output - added tests Signed-off-by: Stefan Marr --- rebench/configurator.py | 33 +++++++++++++------------ rebench/rebench.py | 7 ++++++ rebench/tests/broken-schema.conf | 40 ++++++++++++++++++++++++++++++ rebench/tests/broken-yaml.conf | 42 ++++++++++++++++++++++++++++++++ rebench/tests/executor_test.py | 34 +++++++------------------- rebench/tests/ui_test.py | 25 +++++++++++++++++++ rebench/ui.py | 35 ++++++++++++++++++++++++++ 7 files changed, 176 insertions(+), 40 deletions(-) create mode 100644 rebench/tests/broken-schema.conf create mode 100644 rebench/tests/broken-yaml.conf create mode 100644 rebench/tests/ui_test.py create mode 100644 rebench/ui.py diff --git a/rebench/configurator.py b/rebench/configurator.py index 1874c544..1a7843a3 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -28,6 +28,7 @@ from .model.exp_variables import ExpVariables from .model.reporting import Reporting from .model.virtual_machine import VirtualMachine +from .ui import UIError class _VMFilter(object): @@ -126,22 +127,24 @@ def load_config(file_name): validator = Core( source_data=data, schema_files=[dirname(__file__) + "/rebench-schema.yml"]) - validator.validate(raise_exception=False) - if validator.validation_errors and validator.validation_errors: - logging.error( - "Validation of " + file_name + " failed. " + - (" ".join(validator.validation_errors))) - sys.exit(-1) + try: + validator.validate(raise_exception=True) + except SchemaError as err: + indent = "\n " + raise UIError( + "Validation of " + file_name + " failed." + indent + + (indent.join(validator.validation_errors)), err) return data - except IOError: - logging.error("An error occurred on opening the config file (%s)." - % file_name) - logging.error(traceback.format_exc(0)) - sys.exit(-1) - except yaml.YAMLError: - logging.error("Failed parsing the config file (%s)." % file_name) - logging.error(traceback.format_exc(0)) - sys.exit(-1) + except IOError as err: + if err.errno == 2: + assert err.strerror == "No such file or directory" + raise UIError("The requested config file (%s) could not be opened. %s." + % (file_name, err.strerror), err) + else: + raise UIError(str(err), err) + except yaml.YAMLError as err: + raise UIError("Parsing of the config file " + + file_name + " failed.\nError " + str(err), err) class Configurator(object): diff --git a/rebench/rebench.py b/rebench/rebench.py index 22e75487..5a219b3f 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -29,14 +29,18 @@ # IN THE SOFTWARE. import logging import sys + from argparse import ArgumentParser, RawDescriptionHelpFormatter, SUPPRESS +from humanfriendly.terminal import warning + from .executor import Executor, BatchScheduler, RoundRobinScheduler, \ RandomScheduler from .persistence import DataStore from .reporter import CliReporter from .configurator import Configurator, load_config from .configuration_error import ConfigurationError +from .ui import UIError class ReBench(object): @@ -218,6 +222,9 @@ def main_func(): except KeyboardInterrupt: logging.info("Aborted by user request") return -1 + except UIError as err: + warning(err.message) + return -1 if __name__ == "__main__": diff --git a/rebench/tests/broken-schema.conf b/rebench/tests/broken-schema.conf new file mode 100644 index 00000000..dfb78de4 --- /dev/null +++ b/rebench/tests/broken-schema.conf @@ -0,0 +1,40 @@ +# Config file for ReBench +# Config format is YAML (see http://yaml.org/ for detailed spec) + +# this run definition will be chosen if no parameters are given to rebench.py +standard_experiment: Test +standard_data_file: 'tests/small.data' + +# general configuration for runs +runs: + invocations: 10 + +non_existing_attribute: foo bar + +benchmark_suites: + Suite: + gauge_adapter: TestVM + command: TestBenchMarks %(benchmark)s %(input)s %(variable)s + input_sizes: [2, 10] + variable_values: + - val1 + benchmarks: + - Bench1 + - Bench2 + +virtual_machines: + TestRunner1: + path: tests + binary: test-vm1.py %(cores)s + cores: [1] + TestRunner2: + path: tests + binary: test-vm2.py + +experiments: + Test: + benchmark: + - Suite + executions: + - TestRunner1 + - TestRunner2 diff --git a/rebench/tests/broken-yaml.conf b/rebench/tests/broken-yaml.conf new file mode 100644 index 00000000..03487171 --- /dev/null +++ b/rebench/tests/broken-yaml.conf @@ -0,0 +1,42 @@ +# Config file for ReBench +# Config format is YAML (see http://yaml.org/ for detailed spec) + +# this run definition will be chosen if no parameters are given to rebench.py +standard_experiment: Test +standard_data_file: 'tests/small.data' + +# general configuration for runs +runs: + invocations: 10 + +non_existing_attribute: foo bar + +- fsdfsd + +benchmark_suites: + Suite: + gauge_adapter: TestVM + command: TestBenchMarks %(benchmark)s %(input)s %(variable)s + input_sizes: [2, 10] + variable_values: + - val1 + benchmarks: + - Bench1 + - Bench2 + +virtual_machines: + TestRunner1: + path: tests + binary: test-vm1.py %(cores)s + cores: [1] + TestRunner2: + path: tests + binary: test-vm2.py + +experiments: + Test: + benchmark: + - Suite + executions: + - TestRunner1 + - TestRunner2 diff --git a/rebench/tests/executor_test.py b/rebench/tests/executor_test.py index f01b8ae3..16dc4c25 100644 --- a/rebench/tests/executor_test.py +++ b/rebench/tests/executor_test.py @@ -20,14 +20,14 @@ import unittest import subprocess import os -import sys -from ..executor import Executor, BenchmarkThreadExceptions +from .rebench_test_case import ReBenchTestCase +from .. import ReBench +from ..executor import Executor from ..configurator import Configurator, load_config from ..model.measurement import Measurement from ..persistence import DataStore -from .. import ReBench -from .rebench_test_case import ReBenchTestCase +from ..ui import UIError class ExecutorTest(ReBenchTestCase): @@ -61,13 +61,8 @@ def test_setup_and_run_benchmark(self): # self.assertAlmostEqual(45871.453514433859, i_high) # self.assertAlmostEqual(0.00450904792104, interval_percentage) - def test_broken_command_format(self): - def test_exit(val): - self.assertEqual(-1, val, "got the correct error code") - raise RuntimeError("TEST-PASSED") - sys.exit = test_exit - - try: + def test_broken_command_format_with_ValueError(self): + with self.assertRaises(UIError) as err: options = ReBench().shell_options().parse_args(['dummy']) cnf = Configurator(load_config(self._path + '/test.conf'), DataStore(), options, @@ -75,18 +70,10 @@ def test_exit(val): data_file=self._tmp_file) ex = Executor(cnf.get_runs(), cnf.use_nice, cnf.do_builds) ex.execute() - except RuntimeError as err: - self.assertEqual("TEST-PASSED", str(err)) - except BenchmarkThreadExceptions as err: - self.assertEqual("TEST-PASSED", str(err.exceptions[0])) + self.assertIsInstance(err.exception.source_exception, ValueError) def test_broken_command_format_with_TypeError(self): - def test_exit(val): - self.assertEqual(-1, val, "got the correct error code") - raise RuntimeError("TEST-PASSED") - sys.exit = test_exit - - try: + with self.assertRaises(UIError) as err: options = ReBench().shell_options().parse_args(['dummy']) cnf = Configurator(load_config(self._path + '/test.conf'), DataStore(), options, @@ -94,10 +81,7 @@ def test_exit(val): data_file=self._tmp_file) ex = Executor(cnf.get_runs(), cnf.use_nice, cnf.do_builds) ex.execute() - except RuntimeError as err: - self.assertEqual("TEST-PASSED", str(err)) - except BenchmarkThreadExceptions as err: - self.assertEqual("TEST-PASSED", str(err.exceptions[0])) + self.assertIsInstance(err.exception.source_exception, TypeError) def _basic_execution(self, cnf): runs = cnf.get_runs() diff --git a/rebench/tests/ui_test.py b/rebench/tests/ui_test.py new file mode 100644 index 00000000..bdd53959 --- /dev/null +++ b/rebench/tests/ui_test.py @@ -0,0 +1,25 @@ +from pykwalify.errors import SchemaError +from yaml import YAMLError + +from ..configurator import load_config +from ..ui import UIError + +from .rebench_test_case import ReBenchTestCase + + +class UITest(ReBenchTestCase): + + def test_missing_file(self): + with self.assertRaises(UIError) as err: + load_config("--file-that-does-not-exist") + self.assertIsInstance(err.exception.source_exception, IOError) + + def test_config_not_validating(self): + with self.assertRaises(UIError) as err: + load_config(self._path + '/broken-schema.conf') + self.assertIsInstance(err.exception.source_exception, SchemaError) + + def test_config_not_proper_yaml(self): + with self.assertRaises(UIError) as err: + load_config(self._path + '/broken-yaml.conf') + self.assertIsInstance(err.exception.source_exception, YAMLError) diff --git a/rebench/ui.py b/rebench/ui.py new file mode 100644 index 00000000..b36100ed --- /dev/null +++ b/rebench/ui.py @@ -0,0 +1,35 @@ +# Copyright (c) 2018 Stefan Marr +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class UIError(Exception): + + def __init__(self, message, exception): + super(UIError, self).__init__() + self._message = message + self._exception = exception + + @property + def message(self): + return self._message + + @property + def source_exception(self): + return self._exception From 2276b0407524fabccf3b9757071d17557e99bccd Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sat, 16 Jun 2018 22:37:55 +0800 Subject: [PATCH 02/25] Handle ConfigurationErrors Signed-off-by: Stefan Marr --- rebench/rebench.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rebench/rebench.py b/rebench/rebench.py index 5a219b3f..05f2bf53 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -189,8 +189,8 @@ def run(self, argv=None): cli_reporter, exp_name, args.data_file, args.build_log, exp_filter) except ConfigurationError as exc: - logging.error(exc.message) - sys.exit(-1) + raise UIError(exc.message, exc) + data_store.load_data() return self.execute_experiment() From 440895d2e002e24b1505c61c2a2f7e9ba63997bf Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sat, 16 Jun 2018 22:39:32 +0800 Subject: [PATCH 03/25] Handle subprocess errors gracefully MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - catch them, and rethrow in joining thread - in executor, handle them so that ReBench doesn’t quit Signed-off-by: Stefan Marr --- rebench/executor.py | 27 +++++++++++++++++++++----- rebench/subprocess_with_timeout.py | 31 +++++++++++++++++++++--------- 2 files changed, 44 insertions(+), 14 deletions(-) diff --git a/rebench/executor.py b/rebench/executor.py index 93043528..d133cc03 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -31,6 +31,8 @@ import sys from threading import Thread, RLock +from humanfriendly.terminal import warning + from . import subprocess_with_timeout as subprocess_timeout from .statistics import StatisticProperties from .interop.adapter import ExecutionDeliveredNoResults @@ -387,11 +389,26 @@ def _generate_data_point(self, cmdline, gauge_adapter, run_id, print(cmdline) # execute the external program here run_id.indicate_invocation_start() - (return_code, output, _) = subprocess_timeout.run( - cmdline, cwd=run_id.location, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=True, verbose=self._verbose, - timeout=run_id.max_invocation_time) - output = output.decode('utf-8') + + try: + (return_code, output, _) = subprocess_timeout.run( + cmdline, cwd=run_id.location, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, shell=True, verbose=self._verbose, + timeout=run_id.max_invocation_time) + output = output.decode('utf-8') + except OSError as err: + run_id.fail_immediately() + if err.errno == 2: + warning( + ("Failed executing a run." + "\n It failed with: %s." + "\n File: %s") % (err.strerror, err.filename)) + else: + warning(str(err)) + warning( + ("\n Cmd: %s" + "\n Cwd: %s") % (cmdline, run_id.location)) + return True if return_code != 0 and not self._include_faulty: run_id.indicate_failed_execution() diff --git a/rebench/subprocess_with_timeout.py b/rebench/subprocess_with_timeout.py index 24725ce3..04e1c26a 100644 --- a/rebench/subprocess_with_timeout.py +++ b/rebench/subprocess_with_timeout.py @@ -27,17 +27,25 @@ def __init__(self, binary_name, args, shell, cwd, verbose, stdout, stderr): self.stdout_result = None self.stderr_result = None self.returncode = None + self._exception = None - def run(self): - self._started_cv.acquire() - proc = Popen(self._args, shell=self._shell, cwd=self._cwd, - stdout=self._stdout, stderr=self._stderr) - self._pid = proc.pid - self._started_cv.notify() - self._started_cv.release() + @property + def exception(self): + return self._exception - self.process_output(proc) - self.returncode = proc.returncode + def run(self): + try: + self._started_cv.acquire() + proc = Popen(self._args, shell=self._shell, cwd=self._cwd, + stdout=self._stdout, stderr=self._stderr) + self._pid = proc.pid + self._started_cv.notify() + self._started_cv.release() + + self.process_output(proc) + self.returncode = proc.returncode + except Exception as err: # pylint: disable=broad-except + self._exception = err def get_pid(self): self._started_cv.acquire() @@ -111,6 +119,11 @@ def run(args, cwd=None, shell=False, kill_tree=True, timeout=-1, assert thread.get_pid() is not None return kill_process(thread.get_pid(), kill_tree, thread) + if not thread.is_alive(): + exp = thread.exception + if exp: + raise exp # pylint: disable=raising-bad-type + return thread.returncode, thread.stdout_result, thread.stderr_result From 410e9f4970e445644c76e733f67520f2f7bd449e Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 01:14:42 +0800 Subject: [PATCH 04/25] Use DETAIL_INDENT constant To make sure output is consistent Signed-off-by: Stefan Marr --- rebench/configurator.py | 10 ++++------ rebench/executor.py | 13 +++++++------ rebench/ui.py | 3 +++ 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/rebench/configurator.py b/rebench/configurator.py index 1a7843a3..9f7a350a 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -17,10 +17,8 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -import sys import logging import subprocess -import traceback from os.path import dirname from .model.experiment import Experiment @@ -28,7 +26,7 @@ from .model.exp_variables import ExpVariables from .model.reporting import Reporting from .model.virtual_machine import VirtualMachine -from .ui import UIError +from .ui import UIError, DETAIL_INDENT class _VMFilter(object): @@ -117,6 +115,7 @@ def load_config(file_name): """ import yaml from pykwalify.core import Core + from pykwalify.errors import SchemaError # Disable most logging for pykwalify logging.getLogger('pykwalify').setLevel(logging.ERROR) @@ -130,10 +129,9 @@ def load_config(file_name): try: validator.validate(raise_exception=True) except SchemaError as err: - indent = "\n " raise UIError( - "Validation of " + file_name + " failed." + indent + - (indent.join(validator.validation_errors)), err) + "Validation of " + file_name + " failed." + DETAIL_INDENT + + (DETAIL_INDENT.join(validator.validation_errors)), err) return data except IOError as err: if err.errno == 2: diff --git a/rebench/executor.py b/rebench/executor.py index d133cc03..30337af1 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -34,8 +34,9 @@ from humanfriendly.terminal import warning from . import subprocess_with_timeout as subprocess_timeout -from .statistics import StatisticProperties from .interop.adapter import ExecutionDeliveredNoResults +from .statistics import StatisticProperties +from .ui import DETAIL_INDENT class FailedBuilding(Exception): @@ -400,14 +401,14 @@ def _generate_data_point(self, cmdline, gauge_adapter, run_id, run_id.fail_immediately() if err.errno == 2: warning( - ("Failed executing a run." - "\n It failed with: %s." - "\n File: %s") % (err.strerror, err.filename)) + ("Failed executing a run." + + DETAIL_INDENT + "It failed with: %s." + + DETAIL_INDENT + "File: %s") % (err.strerror, err.filename)) else: warning(str(err)) warning( - ("\n Cmd: %s" - "\n Cwd: %s") % (cmdline, run_id.location)) + (DETAIL_INDENT + "Cmd: %s" + + DETAIL_INDENT + "Cwd: %s") % (cmdline, run_id.location)) return True if return_code != 0 and not self._include_faulty: diff --git a/rebench/ui.py b/rebench/ui.py index b36100ed..7e96c417 100644 --- a/rebench/ui.py +++ b/rebench/ui.py @@ -19,6 +19,9 @@ # IN THE SOFTWARE. +DETAIL_INDENT = "\n " + + class UIError(Exception): def __init__(self, message, exception): From ca4b2652e10fac76a6e66bca549be03297991033 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 01:15:34 +0800 Subject: [PATCH 05/25] Make format string error reporting more robust - also adapt new way of handling it Signed-off-by: Stefan Marr --- rebench/model/run_id.py | 52 ++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/rebench/model/run_id.py b/rebench/model/run_id.py index 4f75a6f4..382fb283 100644 --- a/rebench/model/run_id.py +++ b/rebench/model/run_id.py @@ -19,10 +19,10 @@ # IN THE SOFTWARE. import logging import re -import sys from .benchmark import Benchmark from .termination_check import TerminationCheck +from ..ui import UIError, DETAIL_INDENT class RunId(object): @@ -188,11 +188,23 @@ def as_simple_string(self): self._cores, self._input_size, self._var_value) def _expand_vars(self, string): - return string % {'benchmark' : self._benchmark.command, - 'input' : self._input_size, - 'variable' : self._var_value, - 'cores' : self._cores, - 'warmup' : self._benchmark.run_details.warmup} + try: + return string % {'benchmark': self._benchmark.command, + 'input': self._input_size, + 'variable': self._var_value, + 'cores': self._cores, + 'warmup': self._benchmark.run_details.warmup} + except ValueError as err: + self._report_format_issue_and_exit(string, err) + except TypeError as err: + self._report_format_issue_and_exit(string, err) + except KeyError as err: + msg = ("The configuration of %s contains improper Python format strings." + % self._benchmark.name) + msg += DETAIL_INDENT + ("The command line configured is: %s" % string) + msg += DETAIL_INDENT + ("%s is not supported as key." % err.message) + msg += DETAIL_INDENT + "Only benchmark, input, variable, cores, and warmup are supported." + raise UIError(msg, err) def cmdline(self): if self._cmdline: @@ -210,12 +222,7 @@ def cmdline(self): if self._benchmark.extra_args is not None: cmdline += " %s" % self._benchmark.extra_args - try: - cmdline = self._expand_vars(cmdline) - except ValueError: - self._report_cmdline_format_issue_and_exit(cmdline) - except TypeError: - self._report_cmdline_format_issue_and_exit(cmdline) + cmdline = self._expand_vars(cmdline) self._cmdline = cmdline.strip() return self._cmdline @@ -227,20 +234,21 @@ def __eq__(self, other): def __ne__(self, other): return not self.__eq__(other) - def _report_cmdline_format_issue_and_exit(self, cmdline): - logging.critical("The configuration of %s contains improper " - "Python format strings.", self._benchmark.name) + def _report_format_issue_and_exit(self, cmdline, err): + msg = ("The configuration of the benchmark %s contains an improper Python format string." + % self._benchmark.name) # figure out which format misses a conversion type + msg += DETAIL_INDENT + ("The command line configured is: %s" % cmdline) + msg += DETAIL_INDENT + ("Error: %s" % err.message) without_conversion_type = re.findall( r"%\(.*?\)(?![diouxXeEfFgGcrs%])", cmdline) - logging.error("The command line configured is: %s", cmdline) - logging.error("The following elements do not have conversion types: \"%s\"", - '", "'.join(without_conversion_type)) - logging.error("This can be fixed by replacing for instance %s with %ss", - without_conversion_type[0], - without_conversion_type[0]) - sys.exit(-1) + if without_conversion_type: + msg += DETAIL_INDENT + ("The following elements do not have conversion types: \"%s\"" + % '", "'.join(without_conversion_type)) + msg += DETAIL_INDENT + ("This can be fixed by replacing for instance %s with %ss" + % (without_conversion_type[0], without_conversion_type[0])) + raise UIError(msg, err) def as_str_list(self): result = self._benchmark.as_str_list() From 551e4518f5120fe048e3388bdc474afa5edcf1e8 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 01:23:54 +0800 Subject: [PATCH 06/25] Use warning() when failing to set nice Signed-off-by: Stefan Marr --- rebench/configurator.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/rebench/configurator.py b/rebench/configurator.py index 9f7a350a..f659ebfa 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -21,6 +21,8 @@ import subprocess from os.path import dirname +from humanfriendly.terminal import warning + from .model.experiment import Experiment from .model.exp_run_details import ExpRunDetails from .model.exp_variables import ExpVariables @@ -198,13 +200,12 @@ def _process_cli_options(self): logging.basicConfig(level=logging.ERROR) logging.getLogger().setLevel(logging.ERROR) - if self._options.use_nice: - if not can_set_niceness(): - logging.error("Process niceness cannot be set currently. " - "To execute benchmarks with highest priority, " - "you might need root/admin rights.") - logging.error("Deactivated usage of nice command.") - self._options.use_nice = False + if self._options.use_nice and not can_set_niceness(): + warning("Error: Process niceness could not be set. " + "To execute benchmarks with highest priority, " + "you might need root/admin rights.") + warning("Deactivated usage of nice command.") + self._options.use_nice = False @property def use_nice(self): From 8ae0af7092377ba780d60c48d096d2a5b18e4009 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 01:50:40 +0800 Subject: [PATCH 07/25] =?UTF-8?q?Make=20sure=20measurements=20always=20hav?= =?UTF-8?q?e=20a=20unit,=20that=E2=80=99s=20important?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With out a unit, misinterpretation is likely Signed-off-by: Stefan Marr --- rebench/interop/test_adapter.py | 2 +- rebench/model/measurement.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/rebench/interop/test_adapter.py b/rebench/interop/test_adapter.py index 17ca0cc0..f070c26e 100644 --- a/rebench/interop/test_adapter.py +++ b/rebench/interop/test_adapter.py @@ -34,7 +34,7 @@ class TestAdapter(GaugeAdapter): def parse_data(self, data, run_id): point = DataPoint(run_id) - point.add_measurement(Measurement(self.test_data[self.index], None, + point.add_measurement(Measurement(self.test_data[self.index], "ms", run_id)) self.index = (self.index + 1) % len(self.test_data) return [point] diff --git a/rebench/model/measurement.py b/rebench/model/measurement.py index 5c95af5a..550f6bc5 100644 --- a/rebench/model/measurement.py +++ b/rebench/model/measurement.py @@ -29,6 +29,7 @@ def __init__(self, value, unit, run_id, criterion='total', self._criterion = criterion self._value = value self._unit = unit + assert unit is not None self._timestamp = timestamp or datetime.now() self._line_number = line_number self._filename = filename From df8ac1d7722e4f5528589490f941cff30cc1dfd7 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 02:34:39 +0800 Subject: [PATCH 08/25] Use spinner to indicate progress more nicely This is optimized for non-debug use of rebench. Not sure yet that this is the way forward. Want a more verbose version with more detailed info on measurements, too. Signed-off-by: Stefan Marr --- rebench/executor.py | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/rebench/executor.py b/rebench/executor.py index 30337af1..4f2a6dd1 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -31,11 +31,12 @@ import sys from threading import Thread, RLock +from humanfriendly import Spinner from humanfriendly.terminal import warning from . import subprocess_with_timeout as subprocess_timeout from .interop.adapter import ExecutionDeliveredNoResults -from .statistics import StatisticProperties +from .statistics import StatisticProperties, mean from .ui import DETAIL_INDENT @@ -51,6 +52,8 @@ class RunScheduler(object): def __init__(self, executor): self._executor = executor + self._progress = 0 + self._progress_spinner = None @staticmethod def _filter_out_completed_runs(runs): @@ -64,9 +67,32 @@ def _process_remaining_runs(self, runs): """Abstract, to be implemented""" pass + def _indicate_progress(self, completed_task, run): + if not self._progress_spinner: + return + + totals = run.get_total_values() + if completed_task: + self._progress += 1 + + if totals: + art_mean = mean(run.get_total_values()) + else: + art_mean = 0 + label = "Running Benchmarks: %70s\tmean: %10.1f" \ + % (run.as_simple_string(), art_mean) + self._progress_spinner.step(self._progress, label) + def execute(self): + total_num_runs = len(self._executor.runs) runs = self._filter_out_completed_runs(self._executor.runs) - self._process_remaining_runs(runs) + completed_runs = total_num_runs - len(runs) + self._progress = completed_runs + + with Spinner(label="Running Benchmarks", total=total_num_runs) as spinner: + self._progress_spinner = spinner + spinner.step(completed_runs) + self._process_remaining_runs(runs) class BatchScheduler(RunScheduler): @@ -77,6 +103,7 @@ def _process_remaining_runs(self, runs): completed = False while not completed: completed = self._executor.execute_run(run_id) + self._indicate_progress(completed, run_id) except FailedBuilding: pass @@ -90,6 +117,7 @@ def _process_remaining_runs(self, runs): try: run = task_list.popleft() completed = self._executor.execute_run(run) + self._indicate_progress(completed, run) if not completed: task_list.append(run) except FailedBuilding: @@ -102,9 +130,10 @@ def _process_remaining_runs(self, runs): task_list = list(runs) while task_list: + run = random.choice(task_list) try: - run = random.choice(task_list) completed = self._executor.execute_run(run) + self._indicate_progress(completed, run) if completed: task_list.remove(run) except FailedBuilding: From 4519fcb5916ffd7bd7485542e8ba00be1af6c7a7 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 06:14:52 +1000 Subject: [PATCH 09/25] Added humanfriendly to .travis.yml and setup.py Signed-off-by: Stefan Marr --- .travis.yml | 3 +-- setup.py | 7 ++++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 681d1ff4..c5420fe5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,9 +24,8 @@ addons: - time install: + - pip install coveralls humanfriendly pylint - pip install . - - pip install coveralls - - pip install pylint # command to run tests script: diff --git a/setup.py b/setup.py index f9b592be..7bef5b06 100644 --- a/setup.py +++ b/setup.py @@ -8,10 +8,10 @@ # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: -# +# # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. -# +# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -34,7 +34,8 @@ packages=find_packages(exclude=['*.tests']), install_requires=[ 'PyYAML>=3.12', - 'pykwalify>=1.6.1' + 'pykwalify>=1.6.1', + 'humanfriendly>=4.12' ], entry_points = { 'console_scripts' : ['rebench = rebench:main_func'] From 72bf9b7c4bc4fbeceebf2a094e25c002381e4d81 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 06:43:30 +1000 Subject: [PATCH 10/25] Disable logging for pykwalify Signed-off-by: Stefan Marr --- rebench/configurator.py | 2 +- rebench/tests/rebench_test_case.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/rebench/configurator.py b/rebench/configurator.py index f659ebfa..5e6815d0 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -120,7 +120,7 @@ def load_config(file_name): from pykwalify.errors import SchemaError # Disable most logging for pykwalify - logging.getLogger('pykwalify').setLevel(logging.ERROR) + logging.getLogger('pykwalify').addHandler(logging.NullHandler()) try: with open(file_name, 'r') as conf_file: diff --git a/rebench/tests/rebench_test_case.py b/rebench/tests/rebench_test_case.py index ecc9fd14..604decc7 100644 --- a/rebench/tests/rebench_test_case.py +++ b/rebench/tests/rebench_test_case.py @@ -17,6 +17,7 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. +import logging import os from os.path import dirname, realpath @@ -32,6 +33,8 @@ def _set_path(self, path): os.chdir(self._path) def setUp(self): + logging.getLogger('pykwalify').addHandler(logging.NullHandler()) + self._set_path(__file__) self._tmp_file = mkstemp()[1] # just use the file name From 91eb473df7161e4469ec5d184aa2ff090edab4c2 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 07:37:16 +1000 Subject: [PATCH 11/25] Added warning_low_priority based on humanfriendly This one uses magenta as default color Signed-off-by: Stefan Marr --- rebench/ui.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/rebench/ui.py b/rebench/ui.py index 7e96c417..73e02fae 100644 --- a/rebench/ui.py +++ b/rebench/ui.py @@ -17,11 +17,21 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. +import sys +from humanfriendly.compat import coerce_string +from humanfriendly.terminal import terminal_supports_colors, ansi_wrap, auto_encode DETAIL_INDENT = "\n " +def warning_low_priority(text, *args, **kw): + text = coerce_string(text) + if terminal_supports_colors(sys.stderr): + text = ansi_wrap(text, color='magenta') + auto_encode(sys.stderr, text + '\n', *args, **kw) + + class UIError(Exception): def __init__(self, message, exception): From 19e3089bba479cb079b1701bf030d6dbb572981a Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 07:38:59 +1000 Subject: [PATCH 12/25] Fix reporting of runs below min_iteration_time Signed-off-by: Stefan Marr --- rebench/reporter.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/rebench/reporter.py b/rebench/reporter.py index 3064f78b..c252b489 100644 --- a/rebench/reporter.py +++ b/rebench/reporter.py @@ -26,6 +26,8 @@ import json import re +from .ui import warning_low_priority + try: from http.client import HTTPException from urllib.request import urlopen @@ -118,9 +120,6 @@ def __init__(self, executes_verbose): self._runs_remaining = 0 self._executes_verbose = executes_verbose - # TODO: re-add support, think, we need that based on the proper config, i.e., the run id -# self._min_iteration_time = configurator.statistics.min_iteration_time - def run_failed(self, run_id, cmdline, return_code, output): # Additional information in debug mode result = "[%s] Run failed: %s\n" % ( @@ -155,14 +154,12 @@ def run_completed(self, run_id, statistics, cmdline): self._runs_completed += 1 self._runs_remaining -= 1 - if run_id.run_config.min_iteration_time: - if statistics.mean < run_id.run_config.min_iteration_time: - print(("WARNING: measured mean is lower than min_iteration_time (%s) " - "\t mean: %.1f\trun id: %s") - % (run_id.run_config.min_iteration_time, - statistics.mean, - run_id.as_simple_string())) - print("Cmd: %s" % cmdline) + if run_id.min_iteration_time and statistics.mean < run_id.min_iteration_time: + warning_low_priority( + ("WARNING: measured mean is lower than min_iteration_time (%s) " + "\t mean: %.1f\trun id: %s") + % (run_id.min_iteration_time, statistics.mean, run_id.as_simple_string())) + warning_low_priority("Cmd: %s" % cmdline) def report_job_completed(self, run_ids): print("[%s] Job completed" % datetime.now()) From 9ab367a82a331e601ef7ae59cad94faddfee56d6 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 07:58:41 +1000 Subject: [PATCH 13/25] Fix cli_reporter registration Signed-off-by: Stefan Marr --- rebench/configurator.py | 4 +--- rebench/model/reporting.py | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/rebench/configurator.py b/rebench/configurator.py index 5e6815d0..fa5962a3 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -160,13 +160,11 @@ def __init__(self, raw_config, data_store, cli_options=None, cli_reporter=None, self._root_run_details = ExpRunDetails.compile( raw_config.get('runs', {}), ExpRunDetails.default()) self._root_reporting = Reporting.compile( - raw_config.get('reporting', {}), Reporting.empty(), cli_options) + raw_config.get('reporting', {}), Reporting.empty(cli_reporter), cli_options) self._options = cli_options self._process_cli_options() - self._cli_reporter = cli_reporter - self._data_store = data_store self._build_commands = dict() diff --git a/rebench/model/reporting.py b/rebench/model/reporting.py index c03ea93a..5b9d5ecd 100644 --- a/rebench/model/reporting.py +++ b/rebench/model/reporting.py @@ -34,8 +34,8 @@ def compile(cls, reporting, root_reporting, options): return Reporting(codespeed, cli_reporter) @classmethod - def empty(cls): - return Reporting(None, None) + def empty(cls, cli_reporter): + return Reporting(None, cli_reporter) def __init__(self, codespeed_reporter, cli_reporter): self._codespeed_reporter = codespeed_reporter From b5f97b51bea10ec741bfc3300800d1c1b30ecfd3 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 07:59:00 +1000 Subject: [PATCH 14/25] Fix remaining time calculation Signed-off-by: Stefan Marr --- rebench/model/run_id.py | 11 ++++++++++- rebench/reporter.py | 17 +++++------------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/rebench/model/run_id.py b/rebench/model/run_id.py index 382fb283..225135f0 100644 --- a/rebench/model/run_id.py +++ b/rebench/model/run_id.py @@ -53,6 +53,14 @@ def min_iteration_time(self): def max_invocation_time(self): return self._benchmark.run_details.max_invocation_time + @property + def iterations(self): + return self._benchmark.run_details.iterations + + @property + def invocations(self): + return self._benchmark.run_details.invocations + @property def execute_exclusively(self): return self._benchmark.run_details.execute_exclusively @@ -203,7 +211,8 @@ def _expand_vars(self, string): % self._benchmark.name) msg += DETAIL_INDENT + ("The command line configured is: %s" % string) msg += DETAIL_INDENT + ("%s is not supported as key." % err.message) - msg += DETAIL_INDENT + "Only benchmark, input, variable, cores, and warmup are supported." + msg += DETAIL_INDENT + msg += "Only benchmark, input, variable, cores, and warmup are supported." raise UIError(msg, err) def cmdline(self): diff --git a/rebench/reporter.py b/rebench/reporter.py index c252b489..743448c3 100644 --- a/rebench/reporter.py +++ b/rebench/reporter.py @@ -137,8 +137,8 @@ def run_failed(self, run_id, cmdline, return_code, output): print("Cmd: %s\n" % cmdline) - if run_id.benchmark.suite.has_max_invocation_time(): - logging.debug("max_invocation_time: %s" % run_id.benchmark.suite.max_invocation_time) + if run_id.max_invocation_time: + logging.debug("max_invocation_time: %s" % run_id.max_invocation_time) logging.debug("cwd: %s" % run_id.benchmark.suite.location) if not self._executes_verbose and output and output.strip(): @@ -173,16 +173,9 @@ def set_total_number_of_runs(self, num_runs): def start_run(self, run_id): if self._runs_completed > 0: current = time() - data_points_per_run = run_id.run_config.number_of_data_points - data_points_completed = (self._runs_completed * - data_points_per_run + - len(run_id.get_data_points())) - data_points_remaining = (self._runs_remaining * - data_points_per_run - - len(run_id.get_data_points())) - time_per_data_point = ((current - self._start_time) / - data_points_completed) - etl = time_per_data_point * data_points_remaining + + time_per_invocation = ((current - self._start_time) / self._runs_completed) + etl = time_per_invocation * self._runs_remaining sec = etl % 60 minute = (etl - sec) / 60 % 60 hour = (etl - sec - minute) / 60 / 60 From bda87a77b38ac6085bafa3a0948ffa257d38d59a Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sun, 17 Jun 2018 08:23:06 +1000 Subject: [PATCH 15/25] Add custom wrapper for humanfriendly, to use it for errors Signed-off-by: Stefan Marr --- rebench/configurator.py | 12 +++++------ rebench/executor.py | 11 +++++----- rebench/rebench.py | 6 ++---- rebench/reporter.py | 6 +++--- rebench/ui.py | 48 +++++++++++++++++++++++++++++++++++++++-- 5 files changed, 61 insertions(+), 22 deletions(-) diff --git a/rebench/configurator.py b/rebench/configurator.py index fa5962a3..26c29438 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -21,14 +21,12 @@ import subprocess from os.path import dirname -from humanfriendly.terminal import warning - from .model.experiment import Experiment from .model.exp_run_details import ExpRunDetails from .model.exp_variables import ExpVariables from .model.reporting import Reporting from .model.virtual_machine import VirtualMachine -from .ui import UIError, DETAIL_INDENT +from .ui import UIError, DETAIL_INDENT, error class _VMFilter(object): @@ -199,10 +197,10 @@ def _process_cli_options(self): logging.getLogger().setLevel(logging.ERROR) if self._options.use_nice and not can_set_niceness(): - warning("Error: Process niceness could not be set. " - "To execute benchmarks with highest priority, " - "you might need root/admin rights.") - warning("Deactivated usage of nice command.") + error("Error: Process niceness could not be set. " + "To execute benchmarks with highest priority, " + "you might need root/admin rights.") + error("Deactivated usage of nice command.") self._options.use_nice = False @property diff --git a/rebench/executor.py b/rebench/executor.py index 4f2a6dd1..72004ee1 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -32,12 +32,11 @@ from threading import Thread, RLock from humanfriendly import Spinner -from humanfriendly.terminal import warning from . import subprocess_with_timeout as subprocess_timeout from .interop.adapter import ExecutionDeliveredNoResults from .statistics import StatisticProperties, mean -from .ui import DETAIL_INDENT +from .ui import DETAIL_INDENT, error class FailedBuilding(Exception): @@ -70,7 +69,7 @@ def _process_remaining_runs(self, runs): def _indicate_progress(self, completed_task, run): if not self._progress_spinner: return - + totals = run.get_total_values() if completed_task: self._progress += 1 @@ -429,13 +428,13 @@ def _generate_data_point(self, cmdline, gauge_adapter, run_id, except OSError as err: run_id.fail_immediately() if err.errno == 2: - warning( + error( ("Failed executing a run." + DETAIL_INDENT + "It failed with: %s." + DETAIL_INDENT + "File: %s") % (err.strerror, err.filename)) else: - warning(str(err)) - warning( + error(str(err)) + error( (DETAIL_INDENT + "Cmd: %s" + DETAIL_INDENT + "Cwd: %s") % (cmdline, run_id.location)) return True diff --git a/rebench/rebench.py b/rebench/rebench.py index 05f2bf53..c40e5a5e 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -32,15 +32,13 @@ from argparse import ArgumentParser, RawDescriptionHelpFormatter, SUPPRESS -from humanfriendly.terminal import warning - from .executor import Executor, BatchScheduler, RoundRobinScheduler, \ RandomScheduler from .persistence import DataStore from .reporter import CliReporter from .configurator import Configurator, load_config from .configuration_error import ConfigurationError -from .ui import UIError +from .ui import UIError, error class ReBench(object): @@ -223,7 +221,7 @@ def main_func(): logging.info("Aborted by user request") return -1 except UIError as err: - warning(err.message) + error(err.message) return -1 diff --git a/rebench/reporter.py b/rebench/reporter.py index 743448c3..153193da 100644 --- a/rebench/reporter.py +++ b/rebench/reporter.py @@ -26,7 +26,7 @@ import json import re -from .ui import warning_low_priority +from .ui import warning try: from http.client import HTTPException @@ -155,11 +155,11 @@ def run_completed(self, run_id, statistics, cmdline): self._runs_remaining -= 1 if run_id.min_iteration_time and statistics.mean < run_id.min_iteration_time: - warning_low_priority( + warning( ("WARNING: measured mean is lower than min_iteration_time (%s) " "\t mean: %.1f\trun id: %s") % (run_id.min_iteration_time, statistics.mean, run_id.as_simple_string())) - warning_low_priority("Cmd: %s" % cmdline) + warning("Cmd: %s" % cmdline) def report_job_completed(self, run_ids): print("[%s] Job completed" % datetime.now()) diff --git a/rebench/ui.py b/rebench/ui.py index 73e02fae..2c3e8490 100644 --- a/rebench/ui.py +++ b/rebench/ui.py @@ -20,18 +20,62 @@ import sys from humanfriendly.compat import coerce_string -from humanfriendly.terminal import terminal_supports_colors, ansi_wrap, auto_encode +from humanfriendly.terminal import terminal_supports_colors, ansi_wrap,\ + auto_encode, warning as hf_warning, output as hf_output DETAIL_INDENT = "\n " +DETAIL_INDENT_WON = " " -def warning_low_priority(text, *args, **kw): +_verbose_output = False +_debug_output = False + + +def set_verbose_debug_mode(verbose, debug): + global _verbose_output, _debug_output + _verbose_output = verbose + _debug_output = debug + + +def output(text, *args, **kw): + hf_output(text, *args, **kw) + + +def warning(text, *args, **kw): text = coerce_string(text) if terminal_supports_colors(sys.stderr): text = ansi_wrap(text, color='magenta') auto_encode(sys.stderr, text + '\n', *args, **kw) +def error(text, *args, **kw): + return hf_warning(text, *args, **kw) + + +def verbose_output_info(text, *args, **kw): + if _verbose_output: + text = coerce_string(text) + auto_encode(sys.stdout, text + '\n', *args, **kw) + + +def verbose_error_info(text, *args, **kw): + if _verbose_output: + text = coerce_string(text) + auto_encode(sys.stderr, text + '\n', *args, **kw) + + +def debug_output_info(text, *args, **kw): + if _debug_output: + text = coerce_string(text) + auto_encode(sys.stdout, text + '\n', *args, **kw) + + +def debug_error_info(text, *args, **kw): + if _debug_output: + text = coerce_string(text) + auto_encode(sys.stderr, text + '\n', *args, **kw) + + class UIError(Exception): def __init__(self, message, exception): From 31af765b133c1aef5f20af308687adb5a2510aeb Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 08:35:15 +1200 Subject: [PATCH 16/25] Removed unused setup code in configurator_test Signed-off-by: Stefan Marr --- rebench/tests/configurator_test.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/rebench/tests/configurator_test.py b/rebench/tests/configurator_test.py index 01b8c558..6d3bf561 100644 --- a/rebench/tests/configurator_test.py +++ b/rebench/tests/configurator_test.py @@ -18,7 +18,6 @@ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import unittest -import logging from ..configurator import Configurator, load_config from ..persistence import DataStore @@ -27,14 +26,6 @@ class ConfiguratorTest(ReBenchTestCase): - def setUp(self): - super(ConfiguratorTest, self).setUp() - self._logging_error = logging.error - - def tearDown(self): - super(ConfiguratorTest, self).tearDown() - logging.error = self._logging_error # restore normal logging - def test_experiment_name_from_cli(self): cnf = Configurator(load_config(self._path + '/test.conf'), DataStore(), None, 'Test') From 32ee54f8146fa798817dd7a294c29198a198ab8d Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 08:41:11 +1200 Subject: [PATCH 17/25] Fix semantics of warmup setting - ensure that data is kept in data file TODO: need to add a test for this Signed-off-by: Stefan Marr --- rebench/executor.py | 3 ++- rebench/model/run_id.py | 5 +++-- rebench/rebench-schema.yml | 5 ++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/rebench/executor.py b/rebench/executor.py index 72004ee1..5ff903dd 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -465,8 +465,9 @@ def _eval_output(self, output, run_id, gauge_adapter, cmdline): for data_point in data_points: if warmup is not None and warmup > 0: warmup -= 1 + run_id.add_data_point(data_point, True) else: - run_id.add_data_point(data_point) + run_id.add_data_point(data_point, False) # only log the last num_points_to_show results if i >= num_points - num_points_to_show: logging.debug("Run %s:%s result=%s" % ( diff --git a/rebench/model/run_id.py b/rebench/model/run_id.py index 225135f0..582717d8 100644 --- a/rebench/model/run_id.py +++ b/rebench/model/run_id.py @@ -156,8 +156,9 @@ def add_persistence(self, persistence): def loaded_data_point(self, data_point): self._data_points.append(data_point) - def add_data_point(self, data_point): - self._data_points.append(data_point) + def add_data_point(self, data_point, warmup): + if not warmup: + self._data_points.append(data_point) for persistence in self._persistence: persistence.persist_data_point(data_point) diff --git a/rebench/rebench-schema.yml b/rebench/rebench-schema.yml index 067f9299..092a5b37 100644 --- a/rebench/rebench-schema.yml +++ b/rebench/rebench-schema.yml @@ -20,9 +20,8 @@ schema;runs_type: warmup: type: int desc: | - Consider the first N iterations as warmup. - This is used by reporters, for instance to discard these - N measurements before calculating statistics. + Consider the first N iterations as warmup and ignore them in summary + statistics. Note they are still persistet in the data file. min_iteration_time: type: int From 940cb4a0bc904486a603ccddd0441df94196aa0a Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 08:43:26 +1200 Subject: [PATCH 18/25] Move run time estimation to executor Signed-off-by: Stefan Marr --- rebench/executor.py | 38 ++++++++++++++++++++++++++++---------- rebench/reporter.py | 19 ------------------- 2 files changed, 28 insertions(+), 29 deletions(-) diff --git a/rebench/executor.py b/rebench/executor.py index 5ff903dd..2e6bfb63 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -30,13 +30,14 @@ import subprocess import sys from threading import Thread, RLock +from time import time from humanfriendly import Spinner from . import subprocess_with_timeout as subprocess_timeout from .interop.adapter import ExecutionDeliveredNoResults from .statistics import StatisticProperties, mean -from .ui import DETAIL_INDENT, error +from .ui import DETAIL_INDENT, error, verbose_output_info, warning, debug_output_info class FailedBuilding(Exception): @@ -51,8 +52,10 @@ class RunScheduler(object): def __init__(self, executor): self._executor = executor - self._progress = 0 + self._runs_completed = 0 self._progress_spinner = None + self._start_time = time() + self._total_num_runs = 0 @staticmethod def _filter_out_completed_runs(runs): @@ -66,29 +69,44 @@ def _process_remaining_runs(self, runs): """Abstract, to be implemented""" pass + def _estimate_time_left(self): + if self._runs_completed == 0: + return 0, 0, 0 + + current = time() + time_per_invocation = ((current - self._start_time) / self._runs_completed) + etl = time_per_invocation * (self._total_num_runs - self._runs_completed) + sec = etl % 60 + minute = (etl - sec) / 60 % 60 + hour = (etl - sec - minute) / 60 / 60 + return floor(hour), floor(minute), floor(sec) + def _indicate_progress(self, completed_task, run): if not self._progress_spinner: return totals = run.get_total_values() if completed_task: - self._progress += 1 + self._runs_completed += 1 if totals: art_mean = mean(run.get_total_values()) else: art_mean = 0 - label = "Running Benchmarks: %70s\tmean: %10.1f" \ - % (run.as_simple_string(), art_mean) - self._progress_spinner.step(self._progress, label) + + hour, minute, sec = self._estimate_time_left() + + label = "Running Benchmarks: %70s\tmean: %10.1f\ttime left: %02d:%02d:%02d" \ + % (run.as_simple_string(), art_mean, hour, minute, sec) + self._progress_spinner.step(self._runs_completed, label) def execute(self): - total_num_runs = len(self._executor.runs) + self._total_num_runs = len(self._executor.runs) runs = self._filter_out_completed_runs(self._executor.runs) - completed_runs = total_num_runs - len(runs) - self._progress = completed_runs + completed_runs = self._total_num_runs - len(runs) + self._runs_completed = completed_runs - with Spinner(label="Running Benchmarks", total=total_num_runs) as spinner: + with Spinner(label="Running Benchmarks", total=self._total_num_runs) as spinner: self._progress_spinner = spinner spinner.step(completed_runs) self._process_remaining_runs(runs) diff --git a/rebench/reporter.py b/rebench/reporter.py index 153193da..c37a0b54 100644 --- a/rebench/reporter.py +++ b/rebench/reporter.py @@ -170,25 +170,6 @@ def set_total_number_of_runs(self, num_runs): self._num_runs = num_runs self._runs_remaining = num_runs - def start_run(self, run_id): - if self._runs_completed > 0: - current = time() - - time_per_invocation = ((current - self._start_time) / self._runs_completed) - etl = time_per_invocation * self._runs_remaining - sec = etl % 60 - minute = (etl - sec) / 60 % 60 - hour = (etl - sec - minute) / 60 / 60 - print(("Run %s \t runs left: %00d \t " + - "time left: %02d:%02d:%02d") % (run_id.benchmark.name, - self._runs_remaining, - floor(hour), floor(minute), - floor(sec))) - else: - self._start_time = time() - print("Run %s \t runs left: %d" % (run_id.benchmark.name, - self._runs_remaining)) - def _output_stats(self, output_list, run_id, statistics): if not statistics: return From 06aca8542e51d5cd32118cc68cdb864621fde33e Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 08:45:13 +1200 Subject: [PATCH 19/25] Output subprocesses data in debug mode only Signed-off-by: Stefan Marr --- rebench/executor.py | 16 ++++++++++++---- rebench/rebench.py | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/rebench/executor.py b/rebench/executor.py index 2e6bfb63..b6efbe23 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -268,12 +268,12 @@ def acquire_work(self): class Executor(object): def __init__(self, runs, use_nice, do_builds, include_faulty=False, - verbose=False, scheduler=BatchScheduler, build_log=None): + debug=False, scheduler=BatchScheduler, build_log=None): self._runs = runs self._use_nice = use_nice self._do_builds = do_builds self._include_faulty = include_faulty - self._verbose = verbose + self._debug = debug self._scheduler = self._create_scheduler(scheduler) self._build_log = build_log @@ -332,9 +332,13 @@ def _execute_build_cmd(self, build_command, name, run_id): script = build_command.command + debug_output_info("Starting build" + + DETAIL_INDENT + "Cmd: " + script + + DETAIL_INDENT + "Cwd: " + path) + proc = subprocess.Popen('/bin/sh', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - cwd=path) + cwd=path, verbose=self._debug) proc.stdin.write(str.encode(script)) proc.stdin.close() @@ -438,9 +442,13 @@ def _generate_data_point(self, cmdline, gauge_adapter, run_id, run_id.indicate_invocation_start() try: + debug_output_info("Starting run" + + DETAIL_INDENT + "Cmd: " + cmdline + + DETAIL_INDENT + "Cwd: " + run_id.location) + (return_code, output, _) = subprocess_timeout.run( cmdline, cwd=run_id.location, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=True, verbose=self._verbose, + stderr=subprocess.STDOUT, shell=True, verbose=self._debug, timeout=run_id.max_invocation_time) output = output.decode('utf-8') except OSError as err: diff --git a/rebench/rebench.py b/rebench/rebench.py index c40e5a5e..2ef2b5aa 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -208,7 +208,7 @@ def execute_experiment(self): executor = Executor(runs, self._config.use_nice, self._config.do_builds, self._config.options.include_faulty, - self._config.options.verbose, + self._config.options.debug, scheduler_class, self._config.build_log) return executor.execute() From cd3ea5550d83a3a3be64ba2c6d38444742449085 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 08:48:48 +1200 Subject: [PATCH 20/25] Replace logging with new output functions for more homogeneous UI - reduce redundancy in output - add info need to possibly reproduce issues - structure output for readability Signed-off-by: Stefan Marr --- rebench/configurator.py | 17 +---- rebench/executor.py | 50 +++++++++----- rebench/model/data_point.py | 3 + rebench/model/run_id.py | 16 ++--- rebench/model/termination_check.py | 14 ++-- rebench/persistence.py | 18 +++-- rebench/rebench.py | 9 ++- rebench/reporter.py | 104 ++++++++++------------------- 8 files changed, 103 insertions(+), 128 deletions(-) diff --git a/rebench/configurator.py b/rebench/configurator.py index 26c29438..7a5b951f 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -17,7 +17,6 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -import logging import subprocess from os.path import dirname @@ -26,7 +25,7 @@ from .model.exp_variables import ExpVariables from .model.reporting import Reporting from .model.virtual_machine import VirtualMachine -from .ui import UIError, DETAIL_INDENT, error +from .ui import UIError, DETAIL_INDENT, error, set_verbose_debug_mode class _VMFilter(object): @@ -118,6 +117,7 @@ def load_config(file_name): from pykwalify.errors import SchemaError # Disable most logging for pykwalify + import logging logging.getLogger('pykwalify').addHandler(logging.NullHandler()) try: @@ -183,18 +183,7 @@ def _process_cli_options(self): if self._options is None: return - if self._options.debug: - if self._options.verbose: - logging.basicConfig(level=logging.NOTSET) - logging.getLogger().setLevel(logging.NOTSET) - logging.debug("Enabled verbose debug output.") - else: - logging.basicConfig(level=logging.DEBUG) - logging.getLogger().setLevel(logging.DEBUG) - logging.debug("Enabled debug output.") - else: - logging.basicConfig(level=logging.ERROR) - logging.getLogger().setLevel(logging.ERROR) + set_verbose_debug_mode(self._options.verbose, self._options.debug) if self._options.use_nice and not can_set_niceness(): error("Error: Process niceness could not be set. " diff --git a/rebench/executor.py b/rebench/executor.py index b6efbe23..5d159cd4 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -17,11 +17,10 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -from __future__ import with_statement, print_function +from __future__ import with_statement from collections import deque from math import floor -import logging from multiprocessing import cpu_count import os import pkgutil @@ -235,7 +234,6 @@ def _process_remaining_runs(self, runs): exceptions.append(thread.exception) if exceptions: - print(exceptions) if len(exceptions) == 1: raise exceptions[0] else: @@ -390,7 +388,6 @@ def _execute_build_cmd(self, build_command, name, run_id): def execute_run(self, run_id): termination_check = run_id.get_termination_check() - run_id.log() run_id.report_start_run() gauge_adapter = self._get_gauge_adapter_instance( @@ -408,12 +405,15 @@ def execute_run(self, run_id): if not terminate: terminate = self._generate_data_point(cmdline, gauge_adapter, run_id, termination_check) - stats = StatisticProperties(run_id.get_total_values()) - logging.debug("Run: #%d" % stats.num_samples) if terminate: run_id.report_run_completed(stats, cmdline) + if run_id.min_iteration_time and stats.mean < run_id.min_iteration_time: + warning( + ("Warning: The mean (%.1f) is lower than min_iteration_time (%d)" + "%sCmd: %s") + % (stats.mean, run_id.min_iteration_time, DETAIL_INDENT, cmdline)) return terminate @@ -437,7 +437,6 @@ def _get_gauge_adapter_instance(self, adapter_name): def _generate_data_point(self, cmdline, gauge_adapter, run_id, termination_check): - print(cmdline) # execute the external program here run_id.indicate_invocation_start() @@ -460,18 +459,30 @@ def _generate_data_point(self, cmdline, gauge_adapter, run_id, DETAIL_INDENT + "File: %s") % (err.strerror, err.filename)) else: error(str(err)) - error( - (DETAIL_INDENT + "Cmd: %s" + - DETAIL_INDENT + "Cwd: %s") % (cmdline, run_id.location)) + error((DETAIL_INDENT + "Cmd: %s" + + DETAIL_INDENT + "Cwd: %s") % (cmdline, run_id.location)) return True if return_code != 0 and not self._include_faulty: run_id.indicate_failed_execution() run_id.report_run_failed(cmdline, return_code, output) if return_code == 126: - logging.error(("Could not execute %s. A likely cause is that " - "the file is not marked as executable.") - % run_id.benchmark.vm.name) + error(("Error: Could not execute %s. A likely cause is that " + "the file is not marked as executable. Return code: %d") + % (run_id.benchmark.vm.name, return_code)) + elif return_code == -9: + error("Run timed out. Return code: %d" % return_code) + error(DETAIL_INDENT + "max_invocation_time: %s" % run_id.max_invocation_time) + else: + error("Run failed. Return code: %d" % return_code) + + error((DETAIL_INDENT + "Cmd: %s" + + DETAIL_INDENT + "Cwd: %s") % (cmdline, run_id.location)) + + if output and output.strip(): + lines = output.split('\n') + error(DETAIL_INDENT + "Output: ") + error(DETAIL_INDENT + DETAIL_INDENT.join(lines) + "\n\n") else: self._eval_output(output, run_id, gauge_adapter, cmdline) @@ -485,8 +496,12 @@ def _eval_output(self, output, run_id, gauge_adapter, cmdline): num_points_to_show = 20 num_points = len(data_points) + + msg = "\nCompleted invocation of " + run_id.as_simple_string() + msg += DETAIL_INDENT + "Cmd: " + cmdline + if num_points > num_points_to_show: - logging.debug("Recorded %d data points, show last 20..." % num_points) + msg += DETAIL_INDENT + "Recorded %d data points, show last 20..." % num_points i = 0 for data_point in data_points: if warmup is not None and warmup > 0: @@ -496,11 +511,12 @@ def _eval_output(self, output, run_id, gauge_adapter, cmdline): run_id.add_data_point(data_point, False) # only log the last num_points_to_show results if i >= num_points - num_points_to_show: - logging.debug("Run %s:%s result=%s" % ( - run_id.benchmark.suite.vm.name, run_id.benchmark.name, - data_point.get_total_value())) + msg += DETAIL_INDENT + "%4d\t%s%s" % ( + i + 1, data_point.get_total_value(), data_point.get_total_unit()) i += 1 + run_id.indicate_successful_execution() + verbose_output_info(msg) except ExecutionDeliveredNoResults: run_id.indicate_failed_execution() run_id.report_run_failed(cmdline, 0, output) diff --git a/rebench/model/data_point.py b/rebench/model/data_point.py index 04bbf63c..668ee077 100644 --- a/rebench/model/data_point.py +++ b/rebench/model/data_point.py @@ -41,3 +41,6 @@ def get_measurements(self): def get_total_value(self): return self._total.value if self._total else None + + def get_total_unit(self): + return self._total.unit diff --git a/rebench/model/run_id.py b/rebench/model/run_id.py index 582717d8..7e34132d 100644 --- a/rebench/model/run_id.py +++ b/rebench/model/run_id.py @@ -17,7 +17,6 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -import logging import re from .benchmark import Benchmark @@ -99,12 +98,6 @@ def location(self): return None return self._expand_vars(self._benchmark.suite.location) - def log(self): - msg = "Run Config: number of data points: %d" % self.get_number_of_data_points() - if self._benchmark.run_details.min_iteration_time: - msg += ", min_iteration_time: %dms" % self._benchmark.run_details.min_iteration_time - logging.debug(msg) - def requires_warmup(self): return self._benchmark.run_details.warmup > 0 @@ -174,6 +167,11 @@ def discard_data_points(self): def get_total_values(self): return [dp.get_total_value() for dp in self._data_points] + def get_total_unit(self): + if not self._data_points: + return None + return self._data_points[0].get_total_unit() + def get_termination_check(self): if self._termination_check is None: self._termination_check = TerminationCheck(self._benchmark) @@ -211,7 +209,7 @@ def _expand_vars(self, string): msg = ("The configuration of %s contains improper Python format strings." % self._benchmark.name) msg += DETAIL_INDENT + ("The command line configured is: %s" % string) - msg += DETAIL_INDENT + ("%s is not supported as key." % err.message) + msg += DETAIL_INDENT + ("%s is not supported as key." % err) msg += DETAIL_INDENT msg += "Only benchmark, input, variable, cores, and warmup are supported." raise UIError(msg, err) @@ -250,7 +248,7 @@ def _report_format_issue_and_exit(self, cmdline, err): # figure out which format misses a conversion type msg += DETAIL_INDENT + ("The command line configured is: %s" % cmdline) - msg += DETAIL_INDENT + ("Error: %s" % err.message) + msg += DETAIL_INDENT + ("Error: %s" % err) without_conversion_type = re.findall( r"%\(.*?\)(?![diouxXeEfFgGcrs%])", cmdline) if without_conversion_type: diff --git a/rebench/model/termination_check.py b/rebench/model/termination_check.py index 319fd501..d6d4335f 100644 --- a/rebench/model/termination_check.py +++ b/rebench/model/termination_check.py @@ -17,7 +17,7 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -import logging +from ..ui import verbose_error_info class TerminationCheck(object): @@ -53,15 +53,17 @@ def has_too_many_failures(self, number_of_data_points): def should_terminate(self, number_of_data_points): if self._fail_immediately: - logging.info( + verbose_error_info( "%s was marked to fail immediately" % self._benchmark.name) if self.fails_consecutively(): - logging.error(("Three executions of %s have failed in a row, " + - "benchmark is aborted") % self._benchmark.name) + verbose_error_info( + ("Three executions of %s have failed in a row, " + "benchmark is aborted") % self._benchmark.name) return True elif self.has_too_many_failures(number_of_data_points): - logging.error("Many runs of %s are failing, benchmark is aborted." - % self._benchmark.name) + verbose_error_info( + "Many runs of %s are failing, benchmark is aborted." + % self._benchmark.name) return True elif self._num_invocations >= self._benchmark.run_details.invocations: return True diff --git a/rebench/persistence.py b/rebench/persistence.py index 2e23bc5e..acc655a5 100644 --- a/rebench/persistence.py +++ b/rebench/persistence.py @@ -19,7 +19,6 @@ # IN THE SOFTWARE. import os import sys -import logging import subprocess import shutil from threading import Lock @@ -28,6 +27,7 @@ from .model.data_point import DataPoint from .model.measurement import Measurement from .model.run_id import RunId +from .ui import debug_error_info, DETAIL_INDENT, error, DETAIL_INDENT_WON class DataStore(object): @@ -104,7 +104,8 @@ def discard_data_of_runs(cls, runs): with open(filename, 'r') as data_file: lines = data_file.readlines() except IOError: - logging.info("Failed to open data file: %s" % filename) + debug_error_info( + "Tried to discard old data, but file does not seem to exist: %s" % filename) continue for measure in measures: @@ -147,8 +148,8 @@ def _load_data(self): with open(self._data_filename, 'r') as data_file: self._process_lines(data_file) except IOError: - logging.info("No data loaded %s does not exist." - % self._data_filename) + debug_error_info("No data loaded, since %s does not exist." + % self._data_filename) def _process_lines(self, data_file): """ @@ -156,6 +157,7 @@ def _process_lines(self, data_file): measurement is always the last one serialized for a data point. """ errors = set() + data_point = None previous_run_id = None line_number = 0 @@ -182,9 +184,11 @@ def _process_lines(self, data_file): except ValueError as err: msg = str(err) + if not errors: + debug_error_info("Failed loading data from data file: " + data_file) if msg not in errors: # Configuration is not available, skip data point - logging.log(logging.DEBUG - 1, msg) + debug_error_info(DETAIL_INDENT_WON + msg) errors.add(msg) line_number += 1 @@ -218,8 +222,8 @@ def _insert_shebang_line(self): shutil.copyfileobj(open(renamed_file, 'r'), data_file) os.remove(renamed_file) except Exception as err: # pylint: disable=broad-except - logging.error("An error occurred " + - "while trying to insert a shebang line: %s", err) + error("Error: While inserting a shebang line into the data file.%s%s" + % (DETAIL_INDENT, err)) _SEP = "\t" # separator between serialized parts of a measurement diff --git a/rebench/rebench.py b/rebench/rebench.py index 2ef2b5aa..115c986e 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -27,7 +27,6 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -import logging import sys from argparse import ArgumentParser, RawDescriptionHelpFormatter, SUPPRESS @@ -38,7 +37,7 @@ from .reporter import CliReporter from .configurator import Configurator, load_config from .configuration_error import ConfigurationError -from .ui import UIError, error +from .ui import UIError, error, debug_error_info, verbose_output_info class ReBench(object): @@ -79,7 +78,7 @@ def shell_options(self): default=False, help='Enable debug output') basics.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False, - help='Output more details in the report.') + help='Output more details during execution.') execution = parser.add_argument_group( 'Execution Options', 'Adapt how ReBench executes benchmarks') @@ -193,7 +192,7 @@ def run(self, argv=None): return self.execute_experiment() def execute_experiment(self): - logging.debug("execute experiment: %s" % self._config.experiment_name) + verbose_output_info("Execute experiment: %s" % self._config.experiment_name) # first load old data if available if self._config.options.clean: @@ -218,7 +217,7 @@ def main_func(): try: return 0 if ReBench().run() else -1 except KeyboardInterrupt: - logging.info("Aborted by user request") + debug_error_info("Aborted by user request") return -1 except UIError as err: error(err.message) diff --git a/rebench/reporter.py b/rebench/reporter.py index c37a0b54..a852f89b 100644 --- a/rebench/reporter.py +++ b/rebench/reporter.py @@ -26,7 +26,9 @@ import json import re -from .ui import warning +from humanfriendly.tables import format_pretty_table + +from .ui import output, DETAIL_INDENT, verbose_error_info try: from http.client import HTTPException @@ -89,24 +91,17 @@ def _path_to_string(path): out.append(str(item)) return " ".join(out) + " " - def _generate_all_output(self, run_ids): + @staticmethod + def _generate_all_output(run_ids): rows = [] - col_width = None for run_id in run_ids: stats = StatisticProperties(run_id.get_total_values()) out = run_id.as_str_list() - self._output_stats(out, run_id, stats) - if col_width is None: - col_width = [0] * len(out) + out.append(stats.mean) rows.append(out) - col_width = [max(len(col_content), col) - for col_content, col in zip(out, col_width)] - for row in rows: - result = " ".join([col.ljust(width) - for col, width in zip(row, col_width)]) - yield result + return rows class CliReporter(TextReporter): @@ -121,50 +116,17 @@ def __init__(self, executes_verbose): self._executes_verbose = executes_verbose def run_failed(self, run_id, cmdline, return_code, output): - # Additional information in debug mode - result = "[%s] Run failed: %s\n" % ( - datetime.now(), - " ".join(self._configuration_details(run_id))) - logging.debug(result) - - # Standard error output - if return_code == -9: - log_msg = "Run timed out. return_code: %s" - else: - log_msg = "Run failed return_code: %s" - - print(log_msg % return_code) - - print("Cmd: %s\n" % cmdline) - - if run_id.max_invocation_time: - logging.debug("max_invocation_time: %s" % run_id.max_invocation_time) - logging.debug("cwd: %s" % run_id.benchmark.suite.location) - - if not self._executes_verbose and output and output.strip(): - print("Output:\n%s\n" % output) + pass def run_completed(self, run_id, statistics, cmdline): - result = "[%s] Run completed: %s\n" % ( - datetime.now(), - " ".join(self._configuration_details(run_id, statistics))) - - logging.debug(result) - self._runs_completed += 1 self._runs_remaining -= 1 - if run_id.min_iteration_time and statistics.mean < run_id.min_iteration_time: - warning( - ("WARNING: measured mean is lower than min_iteration_time (%s) " - "\t mean: %.1f\trun id: %s") - % (run_id.min_iteration_time, statistics.mean, run_id.as_simple_string())) - warning("Cmd: %s" % cmdline) - def report_job_completed(self, run_ids): - print("[%s] Job completed" % datetime.now()) - for line in self._generate_all_output(run_ids): - print(line) + output(format_pretty_table( + self._generate_all_output(run_ids), + ['Benchmark', 'VM', 'Suite', 'Extra', 'Core', 'Size', 'Var', 'Mean'], + vertical_bar=' ')) def set_total_number_of_runs(self, num_runs): self._num_runs = num_runs @@ -295,8 +257,7 @@ def _send_payload(self, payload): socket = urlopen(self._cfg.url, payload) response = socket.read() socket.close() - logging.info("Results were sent to Codespeed, response was: " - + response) + return response def _send_to_codespeed(self, results): payload = urlencode({'json': json.dumps(results)}) @@ -307,25 +268,28 @@ def _send_to_codespeed(self, results): # sometimes Codespeed fails to accept a request because something # is not yet properly initialized, let's try again for those cases try: - self._send_payload(payload) + response = self._send_payload(payload) + verbose_error_info("Sent %d results to Codespeed, response was: %s" + % (len(results), response)) except (IOError, HTTPException) as error: - logging.error(str(error) + " This is most likely caused by " - "either a wrong URL in the config file, or an " - "environment not configured in Codespeed. URL: " - + self._cfg.url) - envs = list(set([i['environment'] - for i in results])) - projects = list(set([i['project'] - for i in results])) - benchmarks = list(set([i['benchmark'] - for i in results])) - executables = list(set([i['executable'] - for i in results])) - logging.error("Sent data included environments: %s " - "projects: %s benchmarks: %s executables: %s" - % (envs, projects, benchmarks, executables)) - - logging.info("Sent %d results to Codespeed." % len(results)) + envs = list(set([i['environment'] for i in results])) + projects = list(set([i['project'] for i in results])) + benchmarks = list(set([i['benchmark'] for i in results])) + executables = list(set([i['executable'] for i in results])) + msg = ("Data" + + DETAIL_INDENT + "environments: %s" + + DETAIL_INDENT + "projects: %s" + + DETAIL_INDENT + "benchmarks: %s" + + DETAIL_INDENT + "executables: %s") % ( + envs, projects, benchmarks, executables) + + error("Error: Reporting to Codespeed failed." + + DETAIL_INDENT + str(error) + + DETAIL_INDENT + "This is most likely caused by " + "either a wrong URL in the config file, or an " + "environment not configured in Codespeed." + + DETAIL_INDENT + "URL: " + self._cfg.url + + DETAIL_INDENT + msg) def _prepare_result(self, run_id): stats = StatisticProperties(run_id.get_total_values()) From 004f51b83d0450ae0d5f18616f7d114c9e72e24e Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 08:49:10 +1200 Subject: [PATCH 21/25] Remove unused code from reporter Signed-off-by: Stefan Marr --- rebench/reporter.py | 59 +-------------------------------------------- 1 file changed, 1 insertion(+), 58 deletions(-) diff --git a/rebench/reporter.py b/rebench/reporter.py index a852f89b..ac97372d 100644 --- a/rebench/reporter.py +++ b/rebench/reporter.py @@ -17,12 +17,10 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -from __future__ import with_statement, print_function +from __future__ import with_statement from datetime import datetime from time import time -from math import floor -import logging import json import re @@ -70,19 +68,6 @@ def start_run(self, run_id): class TextReporter(Reporter): - def _configuration_details(self, run_id, statistics=None): - result = ["\t".join(run_id.as_str_list()), " = "] - self._output_stats(result, run_id, statistics) - return result - - def _output_stats(self, output_list, _run_id, statistics): - if not statistics: - return - - for field, value in statistics.__dict__.iteritems(): - if not field.startswith('_'): - output_list.append("%s: %s " % (field, value)) - @staticmethod def _path_to_string(path): out = [path[0].as_simple_string()] @@ -132,48 +117,6 @@ def set_total_number_of_runs(self, num_runs): self._num_runs = num_runs self._runs_remaining = num_runs - def _output_stats(self, output_list, run_id, statistics): - if not statistics: - return - - if run_id.run_failed(): - output_list.append("run failed.") - output_list.append("") - output_list.append("") - output_list.append("") - else: - output_list.append("mean:") - output_list.append(("%.1f" % statistics.mean).rjust(8)) - - -class FileReporter(TextReporter): - """ should be mainly a log file - data is the responsibility of the data_aggregator - """ - - def __init__(self, filename): - super(FileReporter, self).__init__() - self._file = open(filename, 'a+') - - def run_failed(self, run_id, _cmdline, _return_code, _output): - result = "[%s] Run failed: %s\n" % ( - datetime.now(), - " ".join(self._configuration_details(run_id))) - self._file.writelines(result) - - def run_completed(self, run_id, statistics, cmdline): - result = "[%s] Run completed: %s\n" % ( - datetime.now(), - " ".join(self._configuration_details(run_id, statistics))) - self._file.writelines(result) - - def report_job_completed(self, run_ids): - self._file.write("[%s] Job completed\n" % datetime.now()) - for line in self._generate_all_output(run_ids): - self._file.write(line + "\n") - - self._file.close() - class CodespeedReporter(Reporter): """ From a3194ca484d15db889486da51df114f62d0b93b1 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 10:20:47 +1200 Subject: [PATCH 22/25] Fix verbose mode for building Signed-off-by: Stefan Marr --- rebench/executor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/rebench/executor.py b/rebench/executor.py index 5d159cd4..2d30e603 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -336,7 +336,7 @@ def _execute_build_cmd(self, build_command, name, run_id): proc = subprocess.Popen('/bin/sh', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - cwd=path, verbose=self._debug) + cwd=path) proc.stdin.write(str.encode(script)) proc.stdin.close() @@ -350,11 +350,15 @@ def _execute_build_cmd(self, build_command, name, run_id): if file_no == proc.stdout.fileno(): read = self._read(proc.stdout) if read: + if self._debug: + sys.stdout.write(read) log_file.write(name + '|STD:') log_file.write(read) elif file_no == proc.stderr.fileno(): read = self._read(proc.stderr) if read: + if self._debug: + sys.stderr.write(read) log_file.write(name + '|ERR:') log_file.write(read) From 90cbd73a94eb26448de6242b587c7c3b934bacee Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 10:21:05 +1200 Subject: [PATCH 23/25] Set logging level for pykwalify to critical Signed-off-by: Stefan Marr --- rebench/configurator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/rebench/configurator.py b/rebench/configurator.py index 7a5b951f..4b532892 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -118,6 +118,7 @@ def load_config(file_name): # Disable most logging for pykwalify import logging + logging.getLogger('pykwalify').setLevel(logging.CRITICAL) logging.getLogger('pykwalify').addHandler(logging.NullHandler()) try: From 99d8252fd4632ec92e1d8342e603bb54c415a2a1 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 10:25:06 +1200 Subject: [PATCH 24/25] Fix pylint Signed-off-by: Stefan Marr --- rebench/reporter.py | 7 +++---- rebench/ui.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/rebench/reporter.py b/rebench/reporter.py index ac97372d..cfb8d90e 100644 --- a/rebench/reporter.py +++ b/rebench/reporter.py @@ -19,7 +19,6 @@ # THE SOFTWARE. from __future__ import with_statement -from datetime import datetime from time import time import json import re @@ -100,7 +99,7 @@ def __init__(self, executes_verbose): self._runs_remaining = 0 self._executes_verbose = executes_verbose - def run_failed(self, run_id, cmdline, return_code, output): + def run_failed(self, run_id, cmdline, return_code, cmd_output): pass def run_completed(self, run_id, statistics, cmdline): @@ -224,12 +223,12 @@ def _send_to_codespeed(self, results): DETAIL_INDENT + "projects: %s" + DETAIL_INDENT + "benchmarks: %s" + DETAIL_INDENT + "executables: %s") % ( - envs, projects, benchmarks, executables) + envs, projects, benchmarks, executables) error("Error: Reporting to Codespeed failed." + DETAIL_INDENT + str(error) + DETAIL_INDENT + "This is most likely caused by " - "either a wrong URL in the config file, or an " + "either a wrong URL in the config file, or an " "environment not configured in Codespeed." + DETAIL_INDENT + "URL: " + self._cfg.url + DETAIL_INDENT + msg) diff --git a/rebench/ui.py b/rebench/ui.py index 2c3e8490..1cd32275 100644 --- a/rebench/ui.py +++ b/rebench/ui.py @@ -32,7 +32,7 @@ def set_verbose_debug_mode(verbose, debug): - global _verbose_output, _debug_output + global _verbose_output, _debug_output # pylint: disable=global-statement _verbose_output = verbose _debug_output = debug From 6ebaa05f0be45062c8181c0c753b4aa659de7997 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Mon, 18 Jun 2018 15:03:12 +1200 Subject: [PATCH 25/25] Fix build cmd logging Signed-off-by: Stefan Marr --- rebench/executor.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/rebench/executor.py b/rebench/executor.py index 2d30e603..e71b4732 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -445,9 +445,10 @@ def _generate_data_point(self, cmdline, gauge_adapter, run_id, run_id.indicate_invocation_start() try: - debug_output_info("Starting run" + - DETAIL_INDENT + "Cmd: " + cmdline + - DETAIL_INDENT + "Cwd: " + run_id.location) + msg = "Starting run" + DETAIL_INDENT + "Cmd: " + cmdline + if run_id.location: + msg += DETAIL_INDENT + "Cwd: " + run_id.location + debug_output_info(msg) (return_code, output, _) = subprocess_timeout.run( cmdline, cwd=run_id.location, stdout=subprocess.PIPE,