Skip to content

Commit

Permalink
Start importing code
Browse files Browse the repository at this point in the history
- added eclipse project
- added INSTALL
- added config file
  • Loading branch information
smarr committed Sep 9, 2009
1 parent 05dfc4b commit 9dac953
Show file tree
Hide file tree
Showing 5 changed files with 167 additions and 0 deletions.
5 changes: 5 additions & 0 deletions .hgignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# use glob syntax.
syntax: glob

*.pyc
*.DS_Store
17 changes: 17 additions & 0 deletions .project
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>ReBench</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>
10 changes: 10 additions & 0 deletions .pydevproject
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>

<pydev_project>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/ReBench/src</path>
</pydev_pathproperty>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.6</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">python2.6-opt</pydev_property>
</pydev_project>
5 changes: 5 additions & 0 deletions INSTALL
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
ReBench utilizes SciPy for its statistic calculations.

Instructions to install SciPy can be found at
http://www.scipy.org/Installing_SciPy
This includes, that you will also need to install NumPy.
130 changes: 130 additions & 0 deletions rebench.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
# Config file for ReBench
# Config format is YAML (see http://yaml.org/ for detailed spec)

# this run definition will be choosen if no parameters are given to rebench.py
standard_run: CSOM-test

# settings and requirements for statistic evaluation
statistics:
min_runs: 30
max_runs: 50
confidence_level: 0.95
error_margin: 0.005
stop_criterium: percentage
stop_threshold: 5

# definition of benchmark suites
benchmarks:
CSOM:
performance_reader: LogPerformance
# location: /Users/smarr/Projects/PhD/VMs/CSOM
command: -cp Smalltalk Examples/Benchmarks/%(benchmark)s %(input)s
input_sizes: [1, 2, 10, 100, 1000]
benchmarks:
- All.som
- Fibonacci.som
- QuickSort.som
- Sum.som
- Ball.som
- List.som
- Random.som
- Towers.som
- Benchmark.som
- ListElement.som
- Recurse.som
- TowersDisk.som
- Bounce.som
- Loop.som
- Sieve.som
- TreeNode.som
- BubbleSort.som
- Permute.som
- Sort.som
- TreeSort.som
- Dispatch.som
- Queens.som
- Storage.som
- Fannkuch.som:
extra-args: 6
ulimit: 300
CSOM-all:
performance_reader: LogPerformance
# location: /Users/smarr/Projects/PhD/VMs/CSOM
command: -cp Smalltalk Examples/Benchmarks/%(benchmark)s %(input)s
input_sizes: [1, 2, 10, 100, 1000]
benchmarks:
- All.som
ulimit: 300
CSOM-loop:
performance_reader: LogPerformance
command: -cp Smalltalk Examples/Benchmarks/%(benchmark)s %(input)s
input_sizes: [1, 2, 10, 100, 1000]
benchmarks:
- Loop.som
ulimit: 300
Lua:
performance_reader: LogPerformance
command: "%(benchmark)s %(input)s "
input_sizes: [1, 2, 10, 100]
benchmarks:
- ackermann.lua:
extra-args: 6
- ary3.lua:
extra-args: 80
- binarytrees.lua:
extra-args: 5
- except.lua:
extra-args: 600
- fannkuch-opt.lua:
extra-args: 7
- fannkuch.lua:
extra-args: 6
- fasta.lua:
extra-args: 2500
ulimit: 300

# VMs have a name and are specified by a path and the binary to be executed
virtual_machines:
CSOM:
path: /home/smarr/CSOM/smarr/benchmark
binary: CSOM
CSOM-combined:
path: /Users/smarr/Projects/PhD/VMs/CSOM
binary: CSOM-O3-classic
Lua-combined:
path: /home/smarr/Lua/gcc-intermodule-test/src
binary: lua
Lua:
path: /home/smarr/Lua/my-5.1.4/src
binary: lua

# define the benchmarks to be executed for a re-executable benchmark run
run_definitions:
CSOMvsLuaProfiling:
description: >
This run definition is used to compare some basic properties of CSOM
and Lua. It is only used to profile, not execution time evaluation.
actions: profile
executions:
# List of VMs and Benchmarks/Benchmark Suites to be run on them
#- CSOM:
# benchmark: CSOM
- Lua:
benchmark: Lua
CSOM-test:
description: >
This run is used for simple testing purpose
actions: benchmark
benchmark: CSOM-loop
input_size: 1
executions:
- CSOM
Combined-Compilation:
description: >
This run tries to measure the impact of combined compilation.
actions: benchmark
benchmark: Lua
input_size: 1
executions:
- Lua
- Lua-combined

0 comments on commit 9dac953

Please sign in to comment.