Skip to content

Commit

Permalink
Implement the interpretation of the new schema
Browse files Browse the repository at this point in the history
- make naming more consistent with high-level concepts: Benchmark instead of BenchmarkConfig

Signed-off-by: Stefan Marr <git@stefan-marr.de>
  • Loading branch information
smarr committed Jun 16, 2018
1 parent 431e736 commit dbd4999
Show file tree
Hide file tree
Showing 29 changed files with 663 additions and 456 deletions.
164 changes: 104 additions & 60 deletions rebench/configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,11 @@
import traceback
from os.path import dirname

from .model.runs_config import RunsConfig
from .model.experiment import Experiment
from .model.experiment import Experiment
from .model.exp_run_details import ExpRunDetails
from .model.exp_variables import ExpVariables
from .model.reporting import Reporting
from .model.virtual_machine import VirtualMachine


class _VMFilter(object):
Expand All @@ -33,7 +36,7 @@ def __init__(self, name):
self._name = name

def matches(self, bench):
return bench.vm.name == self._name
return bench.suite.vm.name == self._name


class _SuiteFilter(object):
Expand Down Expand Up @@ -143,33 +146,46 @@ def load_config(file_name):

class Configurator(object):

def __init__(self, file_name, data_store, cli_options=None,
cli_reporter=None, exp_name=None, standard_data_file=None,
run_filter=None):
self._raw_config = self._load_config(file_name)
if standard_data_file:
self._raw_config['standard_data_file'] = standard_data_file
def __init__(self, raw_config, data_store, cli_options=None, cli_reporter=None,
exp_name=None, data_file=None, build_log=None, run_filter=None):
self._raw_config_for_debugging = raw_config # kept around for debugging only

self._options = self._process_cli_options(cli_options)
self._exp_name = exp_name
self._build_log = build_log or raw_config.get('build_log', 'build.log')
self._data_file = data_file or raw_config.get('standard_data_file', 'rebench.data')
self._exp_name = exp_name or raw_config.get('standard_experiment', 'all')

self.runs = RunsConfig(**self._raw_config.get('runs', {}))
self._root_run_details = ExpRunDetails.compile(
raw_config.get('runs', {}), ExpRunDetails.default())
self._root_reporting = Reporting.compile(
raw_config.get('reporting', {}), Reporting.empty(), cli_options)

self._options = cli_options
self._process_cli_options()

self._cli_reporter = cli_reporter

self._data_store = data_store
self._build_commands = dict()
self._experiments = self._compile_experiments(cli_reporter,
_RunFilter(run_filter))

self._run_filter = _RunFilter(run_filter)

self._vms = self._compile_vms(raw_config.get('virtual_machines', {}))

self._suites_config = raw_config.get('benchmark_suites', {})

experiments = raw_config.get('experiments', {})
self._experiments = self._compile_experiments(experiments)

@property
def build_log(self):
return self._raw_config.get('build_log', 'build.log')
return self._build_log

def _process_cli_options(self, options):
if options is None:
return None
def _process_cli_options(self):
if self._options is None:
return

if options.debug:
if options.verbose:
if self._options.debug:
if self._options.verbose:
logging.basicConfig(level=logging.NOTSET)
logging.getLogger().setLevel(logging.NOTSET)
logging.debug("Enabled verbose debug output.")
Expand All @@ -181,31 +197,66 @@ def _process_cli_options(self, options):
logging.basicConfig(level=logging.ERROR)
logging.getLogger().setLevel(logging.ERROR)

if options.use_nice:
if self._options.use_nice:
if not can_set_niceness():
logging.error("Process niceness cannot be set currently. "
"To execute benchmarks with highest priority, "
"you might need root/admin rights.")
logging.error("Deactivated usage of nice command.")
options.use_nice = False
self._options.use_nice = False

@property
def use_nice(self):
return self._options is not None and self._options.use_nice

@property
def do_builds(self):
return self._options is not None and self._options.do_builds

@property
def discard_old_data(self):
return self._options is not None and self._options.clean

@property
def experiment_name(self):
return self._exp_name

return options
@property
def data_file(self):
return self._data_file

@property
def reporting(self):
return self._root_reporting

@property
def run_details(self):
return self._root_run_details

@property
def options(self):
return self._options

@property
def use_nice(self):
return self.options is not None and self.options.use_nice
def build_commands(self):
return self._build_commands

@property
def do_builds(self):
return self.options is not None and self.options.do_builds
def run_filter(self):
return self._run_filter

def experiment_name(self):
return self._exp_name or self._raw_config['standard_experiment']
@property
def data_store(self):
return self._data_store

def has_vm(self, vm_name):
return vm_name in self._vms

def get_vm(self, vm_name):
return self._vms[vm_name]

def get_suite(self, suite_name):
return self._suites_config[suite_name]

def get_experiments(self):
"""The configuration has been compiled before it is handed out
Expand All @@ -224,38 +275,31 @@ def get_runs(self):
runs |= exp.get_runs()
return runs

def _compile_experiments(self, cli_reporter, run_filter):
if not self.experiment_name():
raise ValueError("No experiment chosen.")
def _compile_vms(self, vms):
result = {}

variables = ExpVariables.empty()

conf_defs = {}
for vm_name, details in vms.items():
result[vm_name] = VirtualMachine.compile(
vm_name, details, self._root_run_details, variables, self._build_commands)

if self.experiment_name() == "all":
for exp_name in self._raw_config['experiments']:
conf_defs[exp_name] = self._compile_experiment(exp_name,
cli_reporter,
run_filter)
return result

def _compile_experiments(self, experiments):
results = {}

if self._exp_name == 'all':
for exp_name in experiments:
results[exp_name] = self._compile_experiment(exp_name, experiments[exp_name])
else:
if self.experiment_name() not in self._raw_config['experiments']:
if self._exp_name not in experiments:
raise ValueError("Requested experiment '%s' not available." %
self.experiment_name())
conf_defs[self.experiment_name()] = self._compile_experiment(
self.experiment_name(), cli_reporter, run_filter)

return conf_defs

def _compile_experiment(self, exp_name, cli_reporter, run_filter):
exp_def = self._raw_config['experiments'][exp_name]
run_cfg = self.runs

return Experiment(exp_name, exp_def, run_cfg,
self._raw_config['virtual_machines'],
self._raw_config['benchmark_suites'],
self._raw_config.get('reporting', {}),
self._data_store,
self._build_commands,
self._raw_config.get('standard_data_file', None),
self._options.clean if self._options else False,
cli_reporter,
run_filter,
self._options)
self._exp_name)
results[self._exp_name] = self._compile_experiment(
self._exp_name, experiments[self._exp_name])

return results

def _compile_experiment(self, exp_name, experiment):
return Experiment.compile(exp_name, experiment, self)
21 changes: 11 additions & 10 deletions rebench/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,12 +260,12 @@ def _read(stream):
return data.decode('utf-8')

def _build_vm_and_suite(self, run_id):
name = "VM:" + run_id.bench_cfg.vm.name
build = run_id.bench_cfg.vm.build
name = "VM:" + run_id.benchmark.suite.vm.name
build = run_id.benchmark.suite.vm.build
self._process_build(build, name, run_id)

name = "S:" + run_id.bench_cfg.suite.name
build = run_id.bench_cfg.suite.build
name = "S:" + run_id.benchmark.suite.name
build = run_id.benchmark.suite.build
self._process_build(build, name, run_id)

def _process_build(self, build, name, run_id):
Expand Down Expand Up @@ -337,11 +337,11 @@ def _execute_build_cmd(self, build_command, name, run_id):
def execute_run(self, run_id):
termination_check = run_id.get_termination_check()

run_id.run_config.log()
run_id.log()
run_id.report_start_run()

gauge_adapter = self._get_gauge_adapter_instance(
run_id.bench_cfg.gauge_adapter)
run_id.benchmark.gauge_adapter)

cmdline = self._construct_cmdline(run_id, gauge_adapter)

Expand Down Expand Up @@ -386,10 +386,11 @@ def _generate_data_point(self, cmdline, gauge_adapter, run_id,
termination_check):
print(cmdline)
# execute the external program here
run_id.indicate_invocation_start()
(return_code, output, _) = subprocess_timeout.run(
cmdline, cwd=run_id.location, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True, verbose=self._verbose,
timeout=run_id.bench_cfg.suite.max_invocation_time)
timeout=run_id.max_invocation_time)
output = output.decode('utf-8')

if return_code != 0 and not self._include_faulty:
Expand All @@ -398,7 +399,7 @@ def _generate_data_point(self, cmdline, gauge_adapter, run_id,
if return_code == 126:
logging.error(("Could not execute %s. A likely cause is that "
"the file is not marked as executable.")
% run_id.bench_cfg.vm.name)
% run_id.benchmark.vm.name)
else:
self._eval_output(output, run_id, gauge_adapter, cmdline)

Expand All @@ -416,14 +417,14 @@ def _eval_output(self, output, run_id, gauge_adapter, cmdline):
logging.debug("Recorded %d data points, show last 20..." % num_points)
i = 0
for data_point in data_points:
if warmup > 0:
if warmup is not None and warmup > 0:
warmup -= 1
else:
run_id.add_data_point(data_point)
# only log the last num_points_to_show results
if i >= num_points - num_points_to_show:
logging.debug("Run %s:%s result=%s" % (
run_id.bench_cfg.vm.name, run_id.bench_cfg.name,
run_id.benchmark.suite.vm.name, run_id.benchmark.name,
data_point.get_total_value()))
i += 1
run_id.indicate_successful_execution()
Expand Down
18 changes: 18 additions & 0 deletions rebench/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,24 @@
"""


def none_or_int(value):
if value:
return int(value)
return value


def none_or_float(value):
if value:
return float(value)
return value


def none_or_bool(value):
if value:
return bool(value)
return value


def value_with_optional_details(value, default_details=None):
if isinstance(value, dict):
assert len(value) == 1
Expand Down
Loading

0 comments on commit dbd4999

Please sign in to comment.