Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix output locations #69

Merged
merged 3 commits into from
Oct 29, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ on:
# Automatically prepare a minor version release every Saturday
# See https://crontab.guru/#0_0_*_*_6
- cron: "0 0 * * 6"
pull_request:

env:
VERSION: ${{ github.event.inputs.release_version || 'unreleased' }}
Expand Down
2 changes: 1 addition & 1 deletion scripts/mk-run.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def kv(key):
ctime = "%s -f 'elapsed_sec: %%e maxresident_kb: %%M' -o time.out" % os_cmds["time"]
ctimeout = "%s --foreground %s" % (os_cmds["timeout"], csv_row["timeout"])
if tool == "apalache":
return "%s %s %s/bin/apalache-mc check %s %s %s %s %s %s | tee apalache.out" % (
return "%s %s %s/bin/apalache-mc check --profiling=true %s %s %s %s %s %s | tee apalache.out" % (
ctime,
ctimeout,
args.apalacheDir,
Expand Down
187 changes: 114 additions & 73 deletions scripts/parse-logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,28 +17,44 @@
import re
import sys

time_re = re.compile(r'elapsed_sec: (\d+)(|\.\d+) maxresident_kb: (\d+)')
timeout_re = re.compile(r'Command exited with non-zero status 124')
apalache_outcome_re = re.compile(r'The outcome is:\s*(.*)')
apalache_ntrans_re = re.compile(r'Found\s+(\d+)\s+initializing transitions and\s+(\d+)\s+next transitions')
apalache_depth_re = re.compile(r'Step\s+(\d+), level\s+\d+:')
tlc_no_error_re = re.compile(r'Model checking completed. No error has been found')
tlc_states_re = re.compile(r'\s(\d+)\s+distinct states found')
tlc_depth_re = re.compile(r'The depth of the complete state graph search is\s+(\d+)')
tlc_error_re = re.compile(r'Error:.* is violated')
tlc_progress_re =\
re.compile(r'Progress\((\d+)\) at .*: (\d+) states generated (.*), (\d+) distinct states found')
from pathlib import Path

time_re = re.compile(r"elapsed_sec: (\d+)(|\.\d+) maxresident_kb: (\d+)")
timeout_re = re.compile(r"Command exited with non-zero status 124")
apalache_outcome_re = re.compile(r"The outcome is:\s*(.*)")
apalache_ntrans_re = re.compile(
r"Found\s+(\d+)\s+initializing transitions and\s+(\d+)\s+next transitions"
)
apalache_depth_re = re.compile(r"Step\s+(\d+), level\s+\d+:")
tlc_no_error_re = re.compile(r"Model checking completed. No error has been found")
tlc_states_re = re.compile(r"\s(\d+)\s+distinct states found")
tlc_depth_re = re.compile(r"The depth of the complete state graph search is\s+(\d+)")
tlc_error_re = re.compile(r"Error:.* is violated")
tlc_progress_re = re.compile(
r"Progress\((\d+)\) at .*: (\d+) states generated (.*), (\d+) distinct states found"
)


def parse_options():
parser = argparse.ArgumentParser(
description="Parse the logs that are produced by the Apalache tests.")
parser.add_argument("expDir", type=str,
help="The directory where the scripts and experiments logs can be found.")
parser.add_argument("--output", "-o", default='results.csv',
help="The filename of the output CSV file")
parser.add_argument("--human", action="store_true",
help="Make figures human-readable, e.g., convert 3610 seconds to 01H00M")
description="Parse the logs that are produced by the Apalache tests."
)
parser.add_argument(
"expDir",
type=str,
help="The directory where the scripts and experiments logs can be found.",
)
parser.add_argument(
"--output",
"-o",
default="results.csv",
help="The filename of the output CSV file",
)
parser.add_argument(
"--human",
action="store_true",
help="Make figures human-readable, e.g., convert 3610 seconds to 01H00M",
)
args = parser.parse_args()
args.expDir = os.path.realpath(args.expDir)
return args
Expand All @@ -52,60 +68,75 @@ def collect_dirs(args):
files.sort()
for f in files:
exp_path = os.path.join(in_dir, f)
m = re.compile('\d+').match(f)
m = re.compile("\d+").match(f)
if m and os.path.isdir(exp_path):
if os.path.exists(os.path.join(exp_path, 'apalache.out')):
print(' %3s -> apalache' % f)
if os.path.exists(os.path.join(exp_path, "apalache.out")):
print(" %3s -> apalache" % f)
exp_descs.append({"no": int(f), "tool": "apalache", "path": exp_path})
elif os.path.exists(os.path.join(exp_path, 'tlc.out')):
print(' %3s -> tlc' % f)
elif os.path.exists(os.path.join(exp_path, "tlc.out")):
print(" %3s -> tlc" % f)
exp_descs.append({"no": int(f), "tool": "tlc", "path": exp_path})
else:
print(' %3s -> CORRUPTED' % f)
print(" %3s -> CORRUPTED" % f)

return exp_descs


def parse_time(ed):
entry = {'04:time_sec': None, '05:mem_kb': None, '03:status': 'Fail'}
with open(os.path.join(ed['path'], 'time.out'), "r") as f:
entry = {"04:time_sec": None, "05:mem_kb": None, "03:status": "Fail"}
with open(os.path.join(ed["path"], "time.out"), "r") as f:
line = f.readline()
while line:
m = timeout_re.search(line)
if m:
entry = { **entry, '03:status': 'Timeout' }
entry = {**entry, "03:status": "Timeout"}
m = time_re.search(line)
if m:
entry = { **entry,
'04:time_sec': int(math.ceil(float(m.group(1)) + float(m.group(2)))),
'05:mem_kb': m.group(3)}
entry = {
**entry,
"04:time_sec": int(
math.ceil(float(m.group(1)) + float(m.group(2)))
),
"05:mem_kb": m.group(3),
}

line = f.readline()

return entry


def get_experiment_log(ed: dict, fname: str) -> Path:
glob_pattern = "*/" + fname
out_dir = Path(ed["path"]) / "_apalache-out"
try:
return next(out_dir.glob(glob_pattern))
except StopIteration:
raise RuntimeError(
f"Failed to find log file {fname} in experiment directory {out_dir}"
)


def parse_apalache(ed):
entry = {'05:depth': 0, '10:ninit_trans': 0, '11:ninit_trans': 0}
with open(os.path.join(ed['path'], 'detailed.log'), "r") as lf:
entry = {"05:depth": 0, "10:ninit_trans": 0, "11:ninit_trans": 0}
with get_experiment_log(ed, "detailed.log").open("r") as lf:
line = lf.readline()
while line:
m = apalache_outcome_re.search(line)
if m:
entry['03:status'] = m.group(1)
entry["03:status"] = m.group(1)
m = apalache_depth_re.search(line)
if m:
entry['05:depth'] = int(m.group(1))
entry["05:depth"] = int(m.group(1))
m = apalache_ntrans_re.search(line)
if m:
entry['10:ninit_trans'] = m.group(1)
entry['11:ninit_trans'] = m.group(2)
entry["10:ninit_trans"] = m.group(1)
entry["11:ninit_trans"] = m.group(2)

line = lf.readline()

prof_rule_re = re.compile(r'^(\w+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)')
prof_rule_re = re.compile(r"^(\w+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)")
ncells, nclauses, nclause_prod = 0, 0, 0
with open(os.path.join(ed['path'], 'profile-rules.txt'), "r") as pf:
with get_experiment_log(ed, "profile-rules.txt").open("r") as pf:
line = pf.readline()
while line:
m = prof_rule_re.match(line)
Expand All @@ -115,39 +146,42 @@ def parse_apalache(ed):
nclause_prod += int(m.group(5)) * int(m.group(6))
line = pf.readline()

avg_clause_len = \
math.ceil(nclause_prod / nclauses) if (nclauses > 0) else 0
entry = { **entry, '12:ncells': ncells,
'13:nclauses': nclauses, '14:navg_clause_len': avg_clause_len }
avg_clause_len = math.ceil(nclause_prod / nclauses) if (nclauses > 0) else 0
entry = {
**entry,
"12:ncells": ncells,
"13:nclauses": nclauses,
"14:navg_clause_len": avg_clause_len,
}

return entry


def parse_tlc(ed):
entry = {'05:depth': 0, '20:nstates': 0}
with open(os.path.join(ed['path'], 'tlc.out'), "r") as lf:
entry = {"05:depth": 0, "20:nstates": 0}
with open(os.path.join(ed["path"], "tlc.out"), "r") as lf:
line = lf.readline()
while line:
m = tlc_no_error_re.search(line)
if m:
entry['03:status'] = "NoError"
entry["03:status"] = "NoError"

m = tlc_error_re.search(line)
if m:
entry['03:status'] = "Error"
entry["03:status"] = "Error"

m = tlc_progress_re.search(line)
if m:
entry['05:depth'] = int(m.group(1))
entry['20:nstates'] = int(m.group(4))
entry["05:depth"] = int(m.group(1))
entry["20:nstates"] = int(m.group(4))

m = tlc_states_re.search(line)
if m:
entry['20:nstates'] = int(m.group(1))
entry["20:nstates"] = int(m.group(1))

m = tlc_depth_re.search(line)
if m:
entry['05:depth'] = int(m.group(1))
entry["05:depth"] = int(m.group(1))

line = lf.readline()

Expand All @@ -165,6 +199,7 @@ def humanize_sec(sec):
else:
return "%2ds" % sec


def humanize_kb(kb):
mb = int(kb / 1024)
gb = int(mb / 1024)
Expand All @@ -176,6 +211,7 @@ def humanize_kb(kb):
else:
return "%dK" % kb


def humanize_num(num):
# shall we use round(..) instead of int(..)?
thousands = int(num / 1000)
Expand All @@ -202,41 +238,42 @@ def humanize(entry):
def int_or_zero(name):
return int(entry[name]) if name in entry and entry[name] else 0

new_entry = { **entry,
'04:time_hum': humanize_sec(int_or_zero('04:time_sec')),
'05:mem_hum': humanize_kb(int_or_zero('05:mem_kb')),
'12:ncells_hum': humanize_num(int_or_zero('12:ncells')),
'13:nclauses_hum': humanize_num(int_or_zero('13:nclauses')),
'20:nstates_hum': humanize_num(int_or_zero('20:nstates'))
}
new_entry = {
**entry,
"04:time_hum": humanize_sec(int_or_zero("04:time_sec")),
"05:mem_hum": humanize_kb(int_or_zero("05:mem_kb")),
"12:ncells_hum": humanize_num(int_or_zero("12:ncells")),
"13:nclauses_hum": humanize_num(int_or_zero("13:nclauses")),
"20:nstates_hum": humanize_num(int_or_zero("20:nstates")),
}
return new_entry


if __name__ == "__main__":
args = parse_options()
if not os.path.exists(args.expDir):
if not os.path.exists(args.expDir):
print("Error: Directory %s not found." % args.expDir)
sys.exit(1)

# collect the experimental results
eds = collect_dirs(args)
entries = []
for ed in eds:
entry = { '01:no': ed['no'], '02:tool': ed['tool'] }
entry = {"01:no": ed["no"], "02:tool": ed["tool"]}
try:
entry = { **entry, **parse_time(ed) } # merge
if ed['tool'] == 'apalache':
entry = { **entry, **parse_apalache(ed)}
elif ed['tool'] == 'tlc':
entry = { **entry, **parse_tlc(ed)}
entry = {**entry, **parse_time(ed)} # merge
if ed["tool"] == "apalache":
entry = {**entry, **parse_apalache(ed)}
elif ed["tool"] == "tlc":
entry = {**entry, **parse_tlc(ed)}
else:
print('Unknown tool: ' + ed['tool'])
entry['03:status'] = 'Fail'
print("Unknown tool: " + ed["tool"])
entry["03:status"] = "Fail"
except FileNotFoundError as err:
print("Error in %s: %s" % (ed['no'], err))
entry['03:status'] = 'Fail'
print("Error in %s: %s" % (ed["no"], err))
entry["03:status"] = "Fail"

#print(entry)
# print(entry)
entries.append(entry)

# if the values must be human-readable, convert them
Expand All @@ -252,12 +289,16 @@ def int_or_zero(name):
sorted_keys.sort()

# write the results to a csv file
with open(args.output, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=sorted_keys, delimiter=',',
quotechar="'", quoting=csv.QUOTE_MINIMAL)
with open(args.output, "w", newline="") as csvfile:
writer = csv.DictWriter(
csvfile,
fieldnames=sorted_keys,
delimiter=",",
quotechar="'",
quoting=csv.QUOTE_MINIMAL,
)
writer.writeheader()
for e in entries:
writer.writerow(e)

print("\n%d entries are written to %s" % (len(entries), args.output))