Skip to content

Commit

Permalink
Remove support for profiling
Browse files Browse the repository at this point in the history
Closes issue #18.

Signed-off-by: Stefan Marr <git@stefan-marr.de>
  • Loading branch information
smarr committed Feb 7, 2014
1 parent 6f4bf75 commit 6e6e251
Show file tree
Hide file tree
Showing 11 changed files with 9 additions and 301 deletions.
10 changes: 1 addition & 9 deletions rebench.conf
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,10 @@ run_definitions:
This run definition is used for testing.
It should try all possible settings and the generated out
will be compared to the expected one by the unit test(s)
actions: profile
benchmark:
- TestSuite1
- TestSuite2
input_sizes: 1
executions:
# List of VMs and Benchmarks/Benchmark Suites to be run on them
# benchmarks define here will override the ones defined for the whole run
Expand All @@ -82,11 +82,3 @@ run_definitions:
- TestRunner1:
benchmark: TestSuite2
- TestRunner2
TestProfiling:
description: >
This run is used to test the profiling run type
actions: benchmark
benchmark: TestSuite1
input_sizes: 1
executions:
- CSOM
8 changes: 3 additions & 5 deletions rebench/Configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,6 @@ def _compileBenchConfigurations(self, runName):
# and merge it with the global configuration
self.statistics = self.statistics.combine(runDef.get('statistics', {}))


actions = self._valueOrListAllwaysAsList(runDef['actions'])
_benchmarks = self._valueOrListAllwaysAsList(runDef.get( 'benchmark', None))
_input_sizes = self._valueOrListAllwaysAsList(runDef.get('input_sizes', None))

Expand All @@ -167,7 +165,7 @@ def _compileBenchConfigurations(self, runName):
# third step: create final configurations to be executed
configurationDefinitions = []
for suite in suiteDefinitions:
configurationDefinitions += self._compileConfigurations(suite, actions)
configurationDefinitions += self._compileConfigurations(suite)

return configurationDefinitions

Expand Down Expand Up @@ -235,7 +233,7 @@ def _compileSuiteDefinitionsFromVMDef(self, vmDef):

return suiteDefs

def _compileConfigurations(self, suite, actions):
def _compileConfigurations(self, suite):
"""Specialization of the configurations which get executed by using the
suite definitions.
"""
Expand Down Expand Up @@ -264,6 +262,6 @@ def _compileConfigurations(self, suite, actions):
bench['performance_reader'] = suite['performance_reader']

bench['vm'] = suite['vm']
configs.append(BenchmarkConfig.create(bench, actions))
configs.append(BenchmarkConfig.create(bench))

return configs
12 changes: 3 additions & 9 deletions rebench/Executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
# proceed, activelayer, around, base, globalActivateLayer, globalDeactivateLayer

benchmark = layer("benchmark")
profile = layer("profile")
quick = layer("quick")

class Executor:
Expand Down Expand Up @@ -138,11 +137,7 @@ def _eval_output(self, output, runId, perf_reader, error, cmdline, __result__):

def _check_termination_condition(self, runId, error):
return False, error

@after(profile)
def _check_termination_condition(self, runId, error, __result__):
return True, error


@after(benchmark)
def _check_termination_condition(self, runId, error, __result__):
terminate, (consequent_erroneous_runs, erroneous_runs) = __result__
Expand Down Expand Up @@ -220,9 +215,8 @@ def execute(self):
self._reporter.setTotalNumberOfConfigurations(len(configs))

for runId in configs:
for action in runId.actions():
with activelayers(layer(action)):
self._exec_configuration(runId)
with activelayers(layer("benchmark")):
self._exec_configuration(runId)

self._reporter.jobCompleted(configs, self._data)

116 changes: 0 additions & 116 deletions rebench/Reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
logging.info("matplotlib was not found, import failed: " + str(ImportError))

benchmark = layer("benchmark")
profile = layer("profile")
log_to_file = layer("log_to_file")

class Reporter:
Expand Down Expand Up @@ -876,118 +875,3 @@ def _flatten_(self, path, data, result):
else:
result[newPath] = val

class ReporterOld:

def __init__(self, config, output_file = None):
self.config = config
self.benchmark_results = None
self.benchmark_data = None
self.profile_data = None
self.output_file = output_file

if output_file:
globalActivateLayer(log_to_file)
self.header_written = False
self.file = open(self.output_file, 'w+')

def set_data(self, data):
(result, benchmark_data) = data
self.benchmark_results = result
self.benchmark_data = benchmark_data

def compile_report(self):
pass

@after(profile)
def compile_report(self, __result__):
memory_lines = []
opcode_lines = []
library_lines = []

dict = profile_data[0].get_memory_usage()
line = "ObjectSize:" + "\t".join(dict.keys())
memory_lines.append(line)

dict = profile_data[0].get_library_usage()
line = "Library:" + "\t".join(dict.keys())
library_lines.append(line)

dict = profile_data[0].get_opcode_usage()
line = "Opcodes:" + "\t".join(dict.keys())
opcode_lines.append(line)

for profile in profile_data:
vm, bench = profile.get_vm_and_benchmark()
head = "%s:%s"%(vm, bench)
memory_lines.append(head + "\t".join(profile.get_memory_usage().values()))
opcode_lines.append(head + "\t".join(profile.get_opcode_usage().values()))
library_lines.append(head + "\t".join(profile.get_library_usage().values()))

report = "\n".join(memory_lines)
report = report + "\n"
report = "\n".join(opcode_lines)
report = report + "\n"
report = "\n".join(library_lines)

return report

def normalize_data(self, profile_data):
for profileA in profile_data:
for profileB in profile_data:
if profileA != profileB:
profileA.normalize(profileB)

def report_profile_results(self, verbose):
profile_data = self.normalize_data(self.profile_data)
report = self.compile_report(verbose)
return report

def report(self, data, current_vm, num_cores, input_size):
pass

@after(log_to_file)
def report(self, data, current_vm, num_cores, input_size, __result__):
if not self.header_written:
self.file.write("VM\tCores\tInputSize\tBenchmark\tMean\tStdDev\tInterv_low\tInterv_high\tError\n")
self.header_written = True

for bench_name, values in data.iteritems():
(mean, sdev, ((i_low, i_high), error), interval_t) = values
line = "\t".join((current_vm, str(num_cores), str(input_size), bench_name, str(mean), str(sdev), str(i_low), str(i_high), str(error)))
self.file.write(line + "\n")

self.file.flush()

def final_report(self, verbose):
if self.profile_data:
print(self.report_profile_results(verbose))

if self.benchmark_data:
print(self.report_benchmark_results(verbose))

def old(self):
if self.output_file is not None:
if not verbose:
if self.profile_data is not None:
profile = self.report_profile_results(True)
benchmark = self.report_benchmark_results(True)

f = open(self.output_file, 'w+')
try:
f.write(profile)
f.write(benchmark)
finally:
f.close()


def report_benchmark_results(self, verbose):
report = "VM\tBenchmark\tMean\tStdDev\tInterv_low\tInterv_high\tError\n"
lines = []
for (vm, benchmarks) in self.benchmark_results.items():
for (benchmark, results) in benchmarks.items():
(mean, sdev, ((i_low, i_high), error),
interval_t) = results
lines.append("\t".join([vm, benchmark, str(mean), str(sdev), str(i_low), str(i_high), str(error)]))

report += "\n".join(lines)
return report
17 changes: 2 additions & 15 deletions rebench/model/benchmark_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,12 @@ def get_config(cls, name, suiteName, vmName, extra_args = None):
return cls._registry[tmp]

@classmethod
def create(cls, bench_def, actions):
def create(cls, bench_def):
cfg = BenchmarkConfig(**bench_def)
if cfg in BenchmarkConfig._registry:
cfg = BenchmarkConfig._registry[cfg]
else:
BenchmarkConfig._registry[cfg] = cfg

cfg.set_actions(actions)
return cfg

def __init__(self, name, performance_reader, suite, vm, extra_args = None, **kwargs):
Expand All @@ -30,7 +28,6 @@ def __init__(self, name, performance_reader, suite, vm, extra_args = None, **kwa
self._performance_reader = performance_reader
self._suite = suite
self._vm = vm
self._actions = None
self._additional_config = kwargs

@property
Expand Down Expand Up @@ -95,17 +92,7 @@ def __hash__(self):

def as_tuple(self):
return (self._name, self._vm['name'], self._suite['name'], self._extra_args)

def set_actions(self, actions):
if self._actions and 0 != cmp(self._actions, actions):
raise ValueError("Currently the actions for each BenchmarkConfigurations need to be equal.")

self._actions = actions
return self

def actions(self):
return self._actions


@classmethod
def tuple_mapping(cls):
return {'bench' : 0, 'vm' : 1, 'suite' : 2, 'extra_args' : 3}
3 changes: 0 additions & 3 deletions rebench/model/run_id.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,6 @@ def as_simple_string(self):
def as_tuple(self):
return self._cfg.as_tuple() + self._variables + (self._criterion, )

def actions(self):
return self._cfg.actions()

def cmdline(self):
cmdline = ""

Expand Down
95 changes: 0 additions & 95 deletions rebench/profile.py

This file was deleted.

7 changes: 0 additions & 7 deletions rebench/rebench.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,6 @@ def shell_options(self):
help="Do a quick benchmark run instead of a full, "
"statistical rigorous experiment.",
default=False)
#TODO: Profiling is part of the run definition, not a cmd-line
# option...
#options.add_option("-p", "--profile", action="store_true",
# dest="profile",
# help="Profile dynamic characteristics instead "
# " measuring execution time.",
# default=False)
options.add_option("-d", "--debug", action="store_true", dest="debug",
default=False, help="Enable debug output.")
options.add_option("-v", "--verbose", action="store_true",
Expand Down
2 changes: 0 additions & 2 deletions rebench/tests/codespeed.conf
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ reporting:
# see: https://github.com/tobami/codespeed
codespeed:
url: http://localhost:1/ # not supposed to work
project: Test
# other details like commitid are required to be given as parameters
csv_file: test.csv
csv_locale: de_DE.UTF-8
Expand Down Expand Up @@ -55,7 +54,6 @@ run_definitions:
Test:
description: >
This run definition is used for testing.
actions: benchmark
benchmark: TestSuite1
statistics:
min_runs: 1
Expand Down
Loading

0 comments on commit 6e6e251

Please sign in to comment.