From ef4d7b7b763c3ede7a41e8f160909324e49468f2 Mon Sep 17 00:00:00 2001 From: Tapasvi Patel Date: Wed, 25 Sep 2024 17:31:02 +0000 Subject: [PATCH] #292: Added option to dump test results to a custom file and enable CI workflow to summarize ttrt run and perf test results as well as ttrt api test results --- .github/workflows/build-and-test.yml | 8 + docs/src/ttrt.md | 43 ++-- runtime/tools/python/test/test_check.py | 173 ++++++++++---- runtime/tools/python/test/test_perf.py | 154 ++++++++----- runtime/tools/python/test/test_query.py | 116 +++++++--- runtime/tools/python/test/test_read.py | 124 ++++++---- runtime/tools/python/test/test_run.py | 261 ++++++++++++++-------- runtime/tools/python/test/util.py | 3 + runtime/tools/python/ttrt/common/check.py | 9 +- runtime/tools/python/ttrt/common/perf.py | 9 +- runtime/tools/python/ttrt/common/query.py | 9 +- runtime/tools/python/ttrt/common/read.py | 9 +- runtime/tools/python/ttrt/common/run.py | 11 +- runtime/tools/python/ttrt/common/util.py | 36 +++ 14 files changed, 688 insertions(+), 277 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 138ebf981..bc225a0b1 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -242,6 +242,14 @@ jobs: source env/activate ttrt ${{ matrix.build.name }} ${{ matrix.build.ttrt_flags }} ${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon --host-only + - name: Show Test Report + uses: mikepenz/action-junit-report@v4 + if: success() || failure() + with: + report_paths: ttrt_report.xml + check_name: TTRT ${{ matrix.build.runs-on }} ${{ matrix.build.name }} Tests + + run-ttrt-tests: timeout-minutes: 30 diff --git a/docs/src/ttrt.md b/docs/src/ttrt.md index c02d6f510..8df553c47 100644 --- a/docs/src/ttrt.md +++ b/docs/src/ttrt.md @@ -10,6 +10,24 @@ cmake --build build -- ttrt ttrt --help ``` +### Building runtime mode +Add the following flags when building the compiler +```bash +-DTTMLIR_ENABLE_RUNTIME=ON +``` + +If you are building with runtime mode on with `-DTTMLIR_ENABLE_RUNTIME=ON`, you will have to install the following packages when using ttrt +```bash +pip install torch +``` + +### Building perf mode +Add the following flags when building the compiler +```bash +-DTTMLIR_ENABLE_RUNTIME=ON +-DTT_RUNTIME_ENABLE_PERF_TRACE=ON +``` + ## LOGGER Levels ttrt support logging at different logger levels. You will need to set env var `TTRT_LOGGER_LEVEL`. By default, it will print all log messages. ```bash @@ -33,24 +51,6 @@ source ttrt_env/bin/activate pip install ttrt-0.0.235-cp310-cp310-linux_x86_64.whl ``` -### Building runtime mode -Add the following flags when building the compiler -```bash --DTTMLIR_ENABLE_RUNTIME=ON -``` - -If you are building with runtime mode on with `-DTTMLIR_ENABLE_RUNTIME=ON`, you will have to install the following packages when using ttrt -```bash -pip install torch -``` - -### Building perf mode -Add the following flags when building the compiler -```bash --DTTMLIR_ENABLE_RUNTIME=ON --DTT_RUNTIME_ENABLE_PERF_TRACE=ON -``` - ## Generate a flatbuffer file from compiler The compiler supports a pass to load a system descriptor to compile against. You can feed this pass into ttmlir-opt. @@ -134,6 +134,8 @@ ttrt check There are different ways you can use the APIs under ttrt. The first is via the command line as follows. All artifacts are saved under `ttrt-artifacts` folder under `TT_MLIR_HOME` environment variable. By default, all logging is printed to the terminal. You can specify a log file to dump output to. +Note: ttrt will only exit with error code 1 if user provides incorrect argument to any API. ttrt will dump a test result json file (user can provide as well) that will neatly list all the tests that were run and their results (pass, error, skip). This is to ensure all tests are run and one test will not stall/cancel all the other tests in the run. + ### read Read sections of a binary file @@ -153,6 +155,7 @@ ttrt read system_desc.ttsys ttrt read --section system_desc system_desc.ttsys ttrt read system_desc.ttsys --log-file ttrt.log ttrt read out.ttnn --save-artifacts --artifact-dir /path/to/some/dir +ttrt read out.ttnn --result-file result.json ``` ### run @@ -177,6 +180,7 @@ ttrt run /dir/of/flatbuffers --log-file ttrt.log ttrt run out.ttnn --save-artifacts --artifact-dir /path/to/some/dir ttrt run out.ttnn --load-kernels-from-disk ttrt run out.ttnn --enable-async-ttnn +ttrt run out.ttnn --result-file result.json ``` ### query @@ -191,6 +195,7 @@ ttrt query --save-artifacts ttrt query --clean-artifacts ttrt query --save-artifacts --log-file ttrt.log ttrt query --save-artifacts --artifact-dir /path/to/some/dir +ttrt query --result-file result.json ``` ### perf @@ -213,6 +218,7 @@ ttrt perf /dir/of/flatbuffers --host-only ttrt perf /dir/of/flatbuffers --loops 10 --host-only ttrt perf /dir/of/flatbuffers --log-file ttrt.log --host-only ttrt perf --save-artifacts --artifact-dir /path/to/some/dir +ttrt perf out.ttnn --result-file result.json ``` ### check @@ -228,6 +234,7 @@ ttrt check out.ttnn --save-artifacts ttrt check out.ttnn --log-file ttrt.log ttrt check /dir/of/flatbuffers --system-desc /dir/of/system_desc ttrt check --save-artifacts --artifact-dir /path/to/some/dir out.ttnn +ttrt check out.ttnn --result-file result.json ``` ## ttrt as a python package diff --git a/runtime/tools/python/test/test_check.py b/runtime/tools/python/test/test_check.py index 877160d89..ff4c49756 100644 --- a/runtime/tools/python/test/test_check.py +++ b/runtime/tools/python/test/test_check.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -27,28 +28,40 @@ def test_clean_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True check_instance = API.Check(args=custom_args) check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={tinspect.currentframe().f_code.co_name}" def test_clean_artifacts_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_check.log" + command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" sub_process_command(command) assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -56,108 +69,153 @@ def test_save_artifacts(): check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_check.log" + command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" sub_process_command(command) assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_flatbuffer(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH check_instance = API.Check(args=custom_args) check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_flatbuffer_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --log-file {test_flatbuffer_cmd.__name__}_check.log" + command = f"ttrt check {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" sub_process_command(command) assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_flatbuffer(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = DIRECTORY_PATH check_instance = API.Check(args=custom_args) check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_flatbuffer_cmd(): - command = f"ttrt check {DIRECTORY_PATH} --log-file {test_dir_flatbuffer_cmd.__name__}_check.log" + command = f"ttrt check {DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" sub_process_command(command) assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_system_desc(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--system-desc"] = SYSTEM_DESC_FILE_PATH check_instance = API.Check(args=custom_args) check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_system_desc.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_system_desc_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_FILE_PATH} --log-file {test_system_desc_cmd.__name__}_check.log" + command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" sub_process_command(command) assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_system_desc_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_system_descs(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--system-desc"] = SYSTEM_DESC_DIRECTORY_PATH check_instance = API.Check(args=custom_args) check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_dir_system_descs.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_system_descs_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_DIRECTORY_PATH} --log-file {test_dir_system_descs_cmd.__name__}_check.log" + command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" sub_process_command(command) assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_dir_system_descs_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_logger(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -165,13 +223,19 @@ def test_logger(): check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -183,35 +247,50 @@ def test_artifacts(): check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--log-file"] = "test.log" check_instance = API.Check(args=custom_args) check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --log-file {test_log_file_cmd.__name__}_check.log" + command = f"ttrt check {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" sub_process_command(command) assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_log_file_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -220,14 +299,20 @@ def test_artifact_dir(): check_instance() assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifact_dir_cmd.__name__}_check.log" + command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" sub_process_command(command) assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_check.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" diff --git a/runtime/tools/python/test/test_perf.py b/runtime/tools/python/test/test_perf.py index dcba2dc23..824c744a4 100644 --- a/runtime/tools/python/test/test_perf.py +++ b/runtime/tools/python/test/test_perf.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -27,50 +28,63 @@ def test_flatbuffer(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True perf_instance = API.Perf(args=custom_args) perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_flatbuffer_cmd(): - command = f"ttrt perf {BINARY_FILE_PATH} --host-only --log-file {test_flatbuffer_cmd.__name__}_perf.log" + command = f"ttrt perf {BINARY_FILE_PATH} --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_flatbuffer(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = DIRECTORY_PATH custom_args["--host-only"] = True perf_instance = API.Perf(args=custom_args) perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_flatbuffer_cmd(): - command = f"ttrt perf {DIRECTORY_PATH} --host-only --log-file {test_dir_flatbuffer_cmd.__name__}_perf.log" + command = f"ttrt perf {DIRECTORY_PATH} --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_logger(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True log_file_name = "test.log" @@ -79,13 +93,17 @@ def test_logger(): perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True log_file_name = "test.log" @@ -98,13 +116,17 @@ def test_artifacts(): perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_clean_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--clean-artifacts"] = True @@ -112,22 +134,27 @@ def test_clean_artifacts(): perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_clean_artifacts_cmd(): - command = f"ttrt perf {BINARY_FILE_PATH} --host-only --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_perf.log" + command = f"ttrt perf {BINARY_FILE_PATH} --host-only --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--clean-artifacts"] = True @@ -136,22 +163,27 @@ def test_save_artifacts(): perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts_cmd(): - command = f"ttrt perf {BINARY_FILE_PATH} --host-only --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_perf.log" + command = f"ttrt perf {BINARY_FILE_PATH} --host-only --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--log-file"] = "test.log" @@ -159,22 +191,27 @@ def test_log_file(): perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file_cmd(): - command = f"ttrt perf {BINARY_FILE_PATH} --host-only --log-file test.log --log-file {test_log_file_cmd.__name__}_perf.log" + command = f"ttrt perf {BINARY_FILE_PATH} --host-only --log-file ttrt-results/test.log --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_log_file_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--clean-artifacts"] = True @@ -184,22 +221,27 @@ def test_artifact_dir(): perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir_cmd(): - command = f"ttrt perf {BINARY_FILE_PATH} --host-only --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifact_dir_cmd.__name__}_perf.log" + command = f"ttrt perf {BINARY_FILE_PATH} --host-only --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_program_index(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--program-index"] = "0" @@ -207,22 +249,27 @@ def test_program_index(): perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_program_index.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_program_index_cmd(): - command = f"ttrt perf {BINARY_FILE_PATH} --host-only --program-index 0 --log-file {test_program_index_cmd.__name__}_perf.log" + command = f"ttrt perf {BINARY_FILE_PATH} --host-only --program-index 0 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_program_index_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_loops(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--loops"] = 1 @@ -230,37 +277,42 @@ def test_loops(): perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_loops.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_loops_cmd(): - command = f"ttrt perf {BINARY_FILE_PATH} --host-only --loops 1 --log-file {test_loops_cmd.__name__}_perf.log" + command = f"ttrt perf {BINARY_FILE_PATH} --host-only --loops 1 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_loops_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_device(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" custom_args["binary"] = BINARY_FILE_PATH perf_instance = API.Perf(args=custom_args) perf_instance() assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_device.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_device_cmd(): - command = ( - f"ttrt perf {BINARY_FILE_PATH} --log-file {test_device_cmd.__name__}_perf.log" - ) + command = f"ttrt perf {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json" sub_process_command(command) assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_device_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_perf.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" diff --git a/runtime/tools/python/test/test_query.py b/runtime/tools/python/test/test_query.py index 3e3a6ece3..073dcce6f 100644 --- a/runtime/tools/python/test/test_query.py +++ b/runtime/tools/python/test/test_query.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -27,70 +28,100 @@ def test_clean_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" custom_args["--clean-artifacts"] = True query_instance = API.Query(args=custom_args) query_instance() assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_clean_artifacts_cmd(): - command = f"ttrt query --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_query.log" + command = f"ttrt query --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" sub_process_command(command) assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True query_instance = API.Query(args=custom_args) query_instance() assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts_cmd(): - command = f"ttrt query --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_query.log" + command = f"ttrt query --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" sub_process_command(command) assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" custom_args["--log-file"] = "test.log" query_instance = API.Query(args=custom_args) query_instance() assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file_cmd(): - command = f"ttrt query --log-file {test_log_file_cmd.__name__}_query.log" + command = f"ttrt query --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" sub_process_command(command) assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_log_file_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True custom_args["--artifact-dir"] = f"{os.getcwd()}/test-artifacts" @@ -98,35 +129,50 @@ def test_artifact_dir(): query_instance() assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir_cmd(): - command = f"ttrt query --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifact_dir_cmd.__name__}_query.log" + command = f"ttrt query --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" sub_process_command(command) assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_logger(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" log_file_name = "test.log" custom_logger = Logger(log_file_name) query_instance = API.Query(args=custom_args, logger=custom_logger) query_instance() assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" log_file_name = "test.log" custom_logger = Logger(log_file_name) artifacts_folder_path = f"{os.getcwd()}/test-artifacts" @@ -137,26 +183,38 @@ def test_artifacts(): query_instance() assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_quiet(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" custom_args["--quiet"] = True query_instance = API.Query(args=custom_args) query_instance() assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_quiet.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_quiet_cmd(): - command = f"ttrt query --quiet --log-file {test_quiet_cmd.__name__}_query.log" + command = f"ttrt query --quiet --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" sub_process_command(command) assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_quiet_cmd.__name__}" + check_results( + f"ttrt-results/{inspect.currentframe().f_code.co_name}_query.json" + ) + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" diff --git a/runtime/tools/python/test/test_read.py b/runtime/tools/python/test/test_read.py index 76bd09c7b..7ea175a6d 100644 --- a/runtime/tools/python/test/test_read.py +++ b/runtime/tools/python/test/test_read.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -27,48 +28,61 @@ def test_flatbuffer(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = BINARY_FILE_PATH read_instance = API.Read(args=custom_args) read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_flatbuffer_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --log-file {test_flatbuffer_cmd.__name__}_read.log" + command = f"ttrt read {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" sub_process_command(command) assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_flatbuffer(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = DIRECTORY_PATH read_instance = API.Read(args=custom_args) read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_flatbuffer_cmd(): - command = f"ttrt read {DIRECTORY_PATH} --log-file {test_dir_flatbuffer_cmd.__name__}_read.log" + command = f"ttrt read {DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" sub_process_command(command) assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_logger(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -76,24 +90,27 @@ def test_logger(): read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_logger_cmd(): - command = ( - f"ttrt read {BINARY_FILE_PATH} --log-file {test_logger_cmd.__name__}_read.log" - ) + command = f"ttrt read {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" sub_process_command(command) assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_logger_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -105,44 +122,54 @@ def test_artifacts(): read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifacts_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifacts_cmd.__name__}_read.log" + command = f"ttrt read {BINARY_FILE_PATH} --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" sub_process_command(command) assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_artifacts_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_clean_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True read_instance = API.Read(args=custom_args) read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_clean_artifacts_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_read.log" + command = f"ttrt read {BINARY_FILE_PATH} --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" sub_process_command(command) assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -150,35 +177,44 @@ def test_save_artifacts(): read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_read.log" + command = f"ttrt read {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" sub_process_command(command) assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--log-file"] = "test.log" read_instance = API.Read(args=custom_args) read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -187,27 +223,33 @@ def test_artifact_dir(): read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_section(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--section"] = "all" read_instance = API.Read(args=custom_args) read_instance() assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_section.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_section_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --section mlir --log-file {test_section_cmd.__name__}_read.log" + command = f"ttrt read {BINARY_FILE_PATH} --section mlir --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_read.json" sub_process_command(command) assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_section_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_read.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" diff --git a/runtime/tools/python/test/test_run.py b/runtime/tools/python/test/test_run.py index 1648b708c..c75db3d71 100644 --- a/runtime/tools/python/test/test_run.py +++ b/runtime/tools/python/test/test_run.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -27,50 +28,61 @@ def test_flatbuffer(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_flatbuffer_cmd(): - command = ( - f"ttrt run {BINARY_FILE_PATH} --log-file {test_flatbuffer_cmd.__name__}_run.log" - ) + command = f"ttrt run {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_flatbuffer(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = DIRECTORY_PATH run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_dir_flatbuffer_cmd(): - command = f"ttrt run {DIRECTORY_PATH} --log-file {test_dir_flatbuffer_cmd.__name__}_run.log" + command = f"ttrt run {DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_logger(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -78,13 +90,17 @@ def test_logger(): run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -96,35 +112,44 @@ def test_artifacts(): run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_clean_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_clean_artifacts_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -132,46 +157,54 @@ def test_save_artifacts(): run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_save_artifacts_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--log-file"] = "test.log" run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_log_file_cmd(): - command = ( - f"ttrt run {BINARY_FILE_PATH} --log-file {test_log_file_cmd.__name__}_run.log" - ) + command = f"ttrt run {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_log_file_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -180,236 +213,288 @@ def test_artifact_dir(): run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_artifact_dir_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifact_dir_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_program_index(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--program-index"] = "0" run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_program_index.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_program_index_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --program-index 0 --log-file {test_program_index_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --program-index 0 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_program_index_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_loops(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--loops"] = 1 run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_loops.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_loops_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --loops 1 --log-file {test_loops_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --loops 1 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_loops_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_init(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--init"] = "randn" run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_init.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_init_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --init randn --log-file {test_init_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --init randn --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_init_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" @pytest.mark.skip def test_identity(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--identity"] = True run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_identity.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" @pytest.mark.skip def test_identity_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --identity --log-file {test_identity_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --identity --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_identity_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_non_zero(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--non-zero"] = True run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_non_zero.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_non_zero_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --non-zero --log-file {test_non_zero_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --non-zero --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_non_zero_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_rtol(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--rtol"] = 1e-05 run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_rtol.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_rtol_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --rtol 1e-05 --log-file {test_rtol_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --rtol 1e-05 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_rtol_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_atol(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--atol"] = 1e-08 run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_atol.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_atol_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --atol 1e-08 --log-file {test_atol_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --atol 1e-08 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_atol_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_seed(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--seed"] = 1 run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_seed.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_seed_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --seed 1 --log-file {test_seed_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --seed 1 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_seed_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_load_kernels_from_disk(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--load-kernels-from-disk"] = True run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_load_kernels_from_disk.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_load_kernels_from_disk_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --load-kernels-from-disk --log-file {test_load_kernels_from_disk_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --load-kernels-from-disk --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_load_kernels_from_disk_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_enable_async_ttnn(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--enable-async-ttnn"] = True run_instance = API.Run(args=custom_args) run_instance() assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_enable_async_ttnn.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" def test_enable_async_ttnn_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --enable-async-ttnn --log-file {test_enable_async_ttnn_cmd.__name__}_run.log" + command = f"ttrt run {BINARY_FILE_PATH} --enable-async-ttnn --log-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}_run.json" sub_process_command(command) assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_enable_async_ttnn_cmd.__name__}" + check_results(f"ttrt-results/{inspect.currentframe().f_code.co_name}_run.json") + == 0 + ), f"one of more tests failed in={inspect.currentframe().f_code.co_name}" diff --git a/runtime/tools/python/test/util.py b/runtime/tools/python/test/util.py index ac63e87a2..728e5e561 100644 --- a/runtime/tools/python/test/util.py +++ b/runtime/tools/python/test/util.py @@ -26,6 +26,9 @@ SYSTEM_DESC_FILE_PATH = f"{TT_MLIR_HOME}/ttrt-artifacts/system_desc.ttsys" SYSTEM_DESC_DIRECTORY_PATH = f"{TT_MLIR_HOME}/build/test/ttmlir/Silicon/TTNN" +if not os.path.isdir("ttrt-results"): + os.makedirs("ttrt-results") + def sub_process_command(test_command): result = subprocess.run( diff --git a/runtime/tools/python/ttrt/common/check.py b/runtime/tools/python/ttrt/common/check.py index 15730c8c7..2efc1b19a 100644 --- a/runtime/tools/python/ttrt/common/check.py +++ b/runtime/tools/python/ttrt/common/check.py @@ -60,6 +60,13 @@ def initialize_api(): choices=None, help="system desc to check against", ) + Check.register_arg( + name="--result-file", + type=str, + default="check_results.json", + choices=None, + help="test file to save results to", + ) Check.register_arg( name="binary", type=str, @@ -253,7 +260,7 @@ def postprocess(self): for bin in self.ttmetal_binaries: self.artifacts.save_binary(bin) - self.results.save_results("check_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing check API") diff --git a/runtime/tools/python/ttrt/common/perf.py b/runtime/tools/python/ttrt/common/perf.py index 73f320e92..ef8e110ea 100644 --- a/runtime/tools/python/ttrt/common/perf.py +++ b/runtime/tools/python/ttrt/common/perf.py @@ -74,6 +74,13 @@ def initialize_api(): choices=[True, False], help="collect performance trace on host only", ) + Perf.register_arg( + name="--result-file", + type=str, + default="perf_results.json", + choices=None, + help="test file to save results to", + ) Perf.register_arg( name="binary", type=str, @@ -492,7 +499,7 @@ def postprocess(self): } self.results.add_result(test_result) - self.results.save_results("perf_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing perf API") diff --git a/runtime/tools/python/ttrt/common/query.py b/runtime/tools/python/ttrt/common/query.py index 5966a86d5..457a2c0b1 100644 --- a/runtime/tools/python/ttrt/common/query.py +++ b/runtime/tools/python/ttrt/common/query.py @@ -59,6 +59,13 @@ def initialize_api(): choices=[True, False], help="suppress system desc from being printed", ) + Query.register_arg( + name="--result-file", + type=str, + default="query_results.json", + choices=None, + help="test file to save results to", + ) def __init__(self, args={}, logger=None, artifacts=None): for name, attributes in Query.registered_args.items(): @@ -149,7 +156,7 @@ def postprocess(self): } self.results.add_result(test_result) - self.results.save_results("query_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing query API") diff --git a/runtime/tools/python/ttrt/common/read.py b/runtime/tools/python/ttrt/common/read.py index 8dc5df660..8b9af1f31 100644 --- a/runtime/tools/python/ttrt/common/read.py +++ b/runtime/tools/python/ttrt/common/read.py @@ -68,6 +68,13 @@ def initialize_api(): choices=None, help="provides a directory path to save artifacts to", ) + Read.register_arg( + name="--result-file", + type=str, + default="read_results.json", + choices=None, + help="test file to save results to", + ) Read.register_arg( name="binary", type=str, @@ -287,7 +294,7 @@ def postprocess(self): self.results.add_result(test_result) - self.results.save_results("read_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing read API") diff --git a/runtime/tools/python/ttrt/common/run.py b/runtime/tools/python/ttrt/common/run.py index f907e57e3..ec9f423af 100644 --- a/runtime/tools/python/ttrt/common/run.py +++ b/runtime/tools/python/ttrt/common/run.py @@ -121,7 +121,14 @@ def initialize_api(): type=bool, default=False, choices=[True, False], - help="disable async mode device execution for TTNN runtime", + help="enable async mode device execution for TTNN runtime", + ) + Run.register_arg( + name="--result-file", + type=str, + default="run_results.json", + choices=None, + help="test file to save results to", ) Run.register_arg( name="binary", @@ -479,7 +486,7 @@ def postprocess(self): } self.results.add_result(test_result) - self.results.save_results("run_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing run API") diff --git a/runtime/tools/python/ttrt/common/util.py b/runtime/tools/python/ttrt/common/util.py index 8e4e6d049..e3316dca0 100644 --- a/runtime/tools/python/ttrt/common/util.py +++ b/runtime/tools/python/ttrt/common/util.py @@ -672,3 +672,39 @@ def save_results(self, file_name="results.json"): json.dump(self.results, file, indent=2) self.logging.info(f"results saved to={file_name}") + + # count total tests, skips and failures + with open(file_name, "r") as file: + data = json.load(file) + + import xml.etree.ElementTree as ET + + total_tests = len(data) + failures = sum(1 for item in data if item.get("result", "") != "pass") + skipped = sum(1 for item in data if item.get("result", "") == "skipped") + + testsuites = ET.Element("testsuites") + testsuites.set("name", "TTRT") + testsuites.set("tests", str(total_tests)) + testsuites.set("failures", str(failures)) + testsuites.set("skipped", str(skipped)) + + testsuite = ET.SubElement(testsuites, "testsuite") + testsuite.set("name", "TTRT") + testsuite.set("tests", str(total_tests)) + testsuite.set("failures", str(failures)) + testsuite.set("skipped", str(skipped)) + + for item in data: + testcase = ET.SubElement(testsuite, "testcase") + testcase.set("name", item.get("file_path", "")) + testcase.set("file_path", item.get("file_path", "")) + testcase.set("result", item.get("result", "")) + testcase.set("exception", item.get("exception", "")) + testcase.set("log_file", item.get("log_file", "")) + testcase.set("artifacts", item.get("artifacts", "")) + testcase.set("program_index", item.get("program_index", "")) + + tree = ET.ElementTree(testsuites) + xml_file_path = "ttrt_report.xml" + tree.write(xml_file_path, encoding="utf-8", xml_declaration=True)