Skip to content

Commit

Permalink
Merge pull request #618 from hed-standard/dev_update_spec_tests3
Browse files Browse the repository at this point in the history
Redo spec tests to use github modules.  Upload results as an artifact.  Add sidecar/events tests
  • Loading branch information
VisLab authored Mar 5, 2023
2 parents bdb6c6c + 22066bf commit e1d6cdb
Show file tree
Hide file tree
Showing 5 changed files with 120 additions and 27 deletions.
16 changes: 9 additions & 7 deletions .github/workflows/spec_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,8 @@ jobs:
steps:
- name: Checkout hed-python
uses: actions/checkout@v3

- name: Checkout spec
uses: actions/checkout@v3
with:
repository: hed-standard/hed-specification
ref: develop
path: hed-specification
submodules: true

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
Expand All @@ -38,5 +33,12 @@ jobs:
- name: Test with unittest
run: |
python -m unittest spec_tests/*
python -m unittest spec_tests/* > test_results.txt
continue-on-error: true

- name: Upload spec test results
uses: actions/upload-artifact@v3
with:
name: spec-test-results
path: test_results.txt

4 changes: 4 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
[submodule "spec_tests/hed-specification"]
path = spec_tests/hed-specification
url = https://github.com/hed-standard/hed-specification/
branch = develop
1 change: 1 addition & 0 deletions spec_tests/hed-specification
Submodule hed-specification added at 9e7d37
116 changes: 96 additions & 20 deletions spec_tests/test_errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,21 @@
from hed.models.hed_ops import apply_ops
from hed import load_schema_version
from hed import HedValidator
from hed import Sidecar
import io
import json


class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../hed-specification/docs/source/_static/data/error_tests'))
'hed-specification/docs/source/_static/data/error_tests'))
cls.test_files = [os.path.join(test_dir, f) for f in os.listdir(test_dir)
if os.path.isfile(os.path.join(test_dir, f))]
cls.fail_count = 0
cls.default_sidecar = Sidecar(os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_sidecar.json')))


def run_single_test(self, test_file):
with open(test_file, "r") as fp:
Expand All @@ -24,6 +29,7 @@ def run_single_test(self, test_file):
if error_code == "VERSION_DEPRECATED":
print("Skipping VERSION_DEPRECATED test")
continue
name = info.get('name', '')
description = info['description']
schema = info['schema']
if schema:
Expand All @@ -37,32 +43,102 @@ def run_single_test(self, test_file):
validator = HedValidator(schema)
def_mapper = DefMapper(def_dict)
onset_mapper = OnsetMapper(def_mapper)
for section_name in info["tests"]:
for section_name, section in info["tests"].items():
if section_name == "string_tests":
for result, tests in info["tests"]["string_tests"].items():
for test in tests:
modified_test, issues = apply_ops(test, [validator, def_mapper, onset_mapper], check_for_warnings=True, expand_defs=True)
if modified_test and modified_test != test:
_, def_expand_issues = apply_ops(modified_test, validator, check_for_warnings=True)
issues += def_expand_issues
if result == "fails":
if not issues:
print(f"{error_code}: {description}")
print(f"Passed this test(that should fail): {test}")
print(issues)
self.fail_count += 1
else:
if issues:
print(f"{error_code}: {description}")
print(f"Failed this test: {test}")
print(issues)
self.fail_count += 1
self._run_single_string_test(section, validator, def_mapper,
onset_mapper, error_code, description, name)
elif section_name == "sidecar_tests":
self._run_single_sidecar_test(section, validator, def_mapper, onset_mapper, error_code, description,
name)
elif section_name == "event_tests":
self._run_single_events_test(section, validator, def_mapper, onset_mapper, error_code, description,
name)

def _run_single_string_test(self, info, validator, def_mapper, onset_mapper, error_code, description,
name):
for result, tests in info.items():
for test in tests:
modified_test, issues = apply_ops(test, [validator, def_mapper, onset_mapper], check_for_warnings=True,
expand_defs=True)
if modified_test and modified_test != test:
_, def_expand_issues = apply_ops(modified_test, validator, check_for_warnings=True)
issues += def_expand_issues
if result == "fails":
if not issues:
print(f"{error_code}: {description}")
print(f"Passed this test(that should fail) '{name}': {test}")
print(issues)
self.fail_count += 1
else:
if issues:
print(f"{error_code}: {description}")
print(f"Failed this test {name}: {test}")
print(issues)

self.fail_count += 1

def _run_single_sidecar_test(self, info, validator, def_mapper, onset_mapper, error_code, description,
name):
for result, tests in info.items():

for test in tests:
# Well this is a disaster
buffer = io.BytesIO(json.dumps(test).encode("utf-8"))
sidecar = Sidecar(buffer)
issues = sidecar.validate_entries([validator, def_mapper, onset_mapper], check_for_warnings=True)
if result == "fails":
if not issues:
print(f"{error_code}: {description}")
print(f"Passed this test(that should fail) '{name}': {test}")
print(issues)
self.fail_count += 1
else:
if issues:
print(f"{error_code}: {description}")
print(f"Failed this test {name}: {test}")
print(issues)

self.fail_count += 1

def _run_single_events_test(self, info, validator, def_mapper, onset_mapper, error_code, description,
name):
from hed import TabularInput
for result, tests in info.items():

for test in tests:
string = ""
for row in test:
if not isinstance(row, list):
print(f"Improper grouping in test: {error_code}:{name}")
print(f"This is probably a missing set of square brackets.")
break
string += "\t".join(str(x) for x in row) + "\n"

if not string:
print(F"Invalid blank events found in test: {error_code}:{name}")
continue
file_obj = io.BytesIO(string.encode("utf-8"))

file = TabularInput(file_obj, sidecar=self.default_sidecar)
issues = file.validate_file([validator, def_mapper, onset_mapper], check_for_warnings=True)
if result == "fails":
if not issues:
print(f"{error_code}: {description}")
print(f"Passed this test(that should fail) '{name}': {test}")
print(issues)
self.fail_count += 1
else:
if issues:
print(f"{error_code}: {description}")
print(f"Failed this test {name}: {test}")
print(issues)

self.fail_count += 1

def test_summary(self):
for test_file in self.test_files:
self.run_single_test(test_file)
print(f"{self.fail_count} tests got an unexpected result")
self.assertEqual(self.fail_count, 0)

if __name__ == '__main__':
Expand Down
10 changes: 10 additions & 0 deletions spec_tests/test_sidecar.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"onset": {
"Description": "Position of event marker in seconds relative to the start.",
"Units": "s"
},
"duration": {
"Description": "Duration of the event in seconds.",
"Units": "s"
}
}

0 comments on commit e1d6cdb

Please sign in to comment.