diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index b42e3612af..6641fbda3f 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -286,6 +286,13 @@ jobs: name: ${{ matrix.build.runs-on }}_${{ matrix.build.name }}_results.json path: ${{ matrix.build.name }}_results.json + - name: Show Test Report + uses: mikepenz/action-junit-report@v4 + if: success() || failure() + with: + report_paths: ttrt_report.xml + check_name: TTRT ${{ matrix.build.runs-on }} ${{ matrix.build.name }} Tests + run-ttrt-tests: timeout-minutes: 30 diff --git a/docs/src/ttrt.md b/docs/src/ttrt.md index d69ae44adc..d5d6d87c7c 100644 --- a/docs/src/ttrt.md +++ b/docs/src/ttrt.md @@ -10,6 +10,19 @@ cmake --build build -- ttrt ttrt --help ``` +### Building runtime mode +Add the following flags when building the compiler +```bash +-DTTMLIR_ENABLE_RUNTIME=ON +``` + +### Building perf mode +Add the following flags when building the compiler +```bash +-DTTMLIR_ENABLE_RUNTIME=ON +-DTT_RUNTIME_ENABLE_PERF_TRACE=ON +``` + ## LOGGER Levels ttrt support logging at different logger levels. You will need to set env var `TTRT_LOGGER_LEVEL`. By default, it will print all log messages. ```bash @@ -33,24 +46,6 @@ source ttrt_env/bin/activate pip install ttrt-0.0.235-cp310-cp310-linux_x86_64.whl ``` -### Building runtime mode -Add the following flags when building the compiler -```bash --DTTMLIR_ENABLE_RUNTIME=ON -``` - -If you are building with runtime mode on with `-DTTMLIR_ENABLE_RUNTIME=ON`, you will have to install the following packages when using ttrt -```bash -pip install torch -``` - -### Building perf mode -Add the following flags when building the compiler -```bash --DTTMLIR_ENABLE_RUNTIME=ON --DTT_RUNTIME_ENABLE_PERF_TRACE=ON -``` - ## Generate a flatbuffer file from compiler The compiler supports a pass to load a system descriptor to compile against. You can feed this pass into ttmlir-opt. @@ -153,6 +148,7 @@ ttrt read system_desc.ttsys ttrt read --section system_desc system_desc.ttsys ttrt read system_desc.ttsys --log-file ttrt.log ttrt read out.ttnn --save-artifacts --artifact-dir /path/to/some/dir +ttrt read out.ttnn --result-file result.json ``` ### run @@ -177,6 +173,7 @@ ttrt run /dir/of/flatbuffers --log-file ttrt.log ttrt run out.ttnn --save-artifacts --artifact-dir /path/to/some/dir ttrt run out.ttnn --load-kernels-from-disk ttrt run out.ttnn --enable-async-ttnn +ttrt run out.ttnn --result-file result.json ``` ### query @@ -191,6 +188,7 @@ ttrt query --save-artifacts ttrt query --clean-artifacts ttrt query --save-artifacts --log-file ttrt.log ttrt query --save-artifacts --artifact-dir /path/to/some/dir +ttrt query --result-file result.json ``` ### perf @@ -219,6 +217,7 @@ ttrt perf /dir/of/flatbuffers --host-only ttrt perf /dir/of/flatbuffers --loops 10 --host-only ttrt perf /dir/of/flatbuffers --log-file ttrt.log --host-only ttrt perf --save-artifacts --artifact-dir /path/to/some/dir +ttrt perf out.ttnn --result-file result.json ``` To use the Tracy GUI, run the following instructions on your macbook. You can upload your .tracy file into the GUI to view the profiled dumps. @@ -242,6 +241,7 @@ ttrt check out.ttnn --save-artifacts ttrt check out.ttnn --log-file ttrt.log ttrt check /dir/of/flatbuffers --system-desc /dir/of/system_desc ttrt check --save-artifacts --artifact-dir /path/to/some/dir out.ttnn +ttrt check out.ttnn --result-file result.json ``` ## ttrt as a python package diff --git a/runtime/tools/python/test/conftest.py b/runtime/tools/python/test/conftest.py new file mode 100644 index 0000000000..bb93fc0e36 --- /dev/null +++ b/runtime/tools/python/test/conftest.py @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import pytest + +from util import * + + +@pytest.fixture(scope="session", autouse=True) +def session_setup(): + directory_name = "ttrt-results" + if not os.path.exists(directory_name): + try: + os.mkdir(directory_name) + except Exception as e: + print(f"An error occurred while creating the directory: {e}") + + yield + + +def pytest_runtest_teardown(item, nextitem): + assert ( + check_results(f"ttrt-results/{item.name}.json") == 0 + ), f"one of more tests failed in={item.name}" diff --git a/runtime/tools/python/test/test_check.py b/runtime/tools/python/test/test_check.py index 877160d89a..b1f51af14f 100644 --- a/runtime/tools/python/test/test_check.py +++ b/runtime/tools/python/test/test_check.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -24,154 +25,126 @@ from util import * -def test_clean_artifacts(): +def test_clean_artifacts_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True check_instance = API.Check(args=custom_args) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" - -def test_clean_artifacts_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_check.log" +def test_clean_artifacts_cmd_check(): + command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" - -def test_save_artifacts(): +def test_save_artifacts_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True check_instance = API.Check(args=custom_args) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" - -def test_save_artifacts_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_check.log" +def test_save_artifacts_cmd_check(): + command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" - -def test_flatbuffer(): +def test_flatbuffer_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH check_instance = API.Check(args=custom_args) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer.__name__}" - -def test_flatbuffer_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --log-file {test_flatbuffer_cmd.__name__}_check.log" +def test_flatbuffer_cmd_check(): + command = f"ttrt check {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer_cmd.__name__}" - -def test_dir_flatbuffer(): +def test_dir_flatbuffer_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = DIRECTORY_PATH check_instance = API.Check(args=custom_args) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer.__name__}" - -def test_dir_flatbuffer_cmd(): - command = f"ttrt check {DIRECTORY_PATH} --log-file {test_dir_flatbuffer_cmd.__name__}_check.log" +def test_dir_flatbuffer_cmd_check(): + command = f"ttrt check {DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer_cmd.__name__}" - -def test_system_desc(): +def test_system_desc_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--system-desc"] = SYSTEM_DESC_FILE_PATH check_instance = API.Check(args=custom_args) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_system_desc.__name__}" - -def test_system_desc_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_FILE_PATH} --log-file {test_system_desc_cmd.__name__}_check.log" +def test_system_desc_cmd_check(): + command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_system_desc_cmd.__name__}" - -def test_dir_system_descs(): +def test_dir_system_descs_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--system-desc"] = SYSTEM_DESC_DIRECTORY_PATH check_instance = API.Check(args=custom_args) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_dir_system_descs.__name__}" - -def test_dir_system_descs_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_DIRECTORY_PATH} --log-file {test_dir_system_descs_cmd.__name__}_check.log" +def test_dir_system_descs_cmd_check(): + command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_dir_system_descs_cmd.__name__}" - -def test_logger(): +def test_logger_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) check_instance = API.Check(args=custom_args, logger=custom_logger) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" - -def test_artifacts(): +def test_artifacts_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -182,36 +155,30 @@ def test_artifacts(): check_instance = API.Check(args=custom_args, artifacts=custom_artifacts) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" - -def test_log_file(): +def test_log_file_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--log-file"] = "test.log" check_instance = API.Check(args=custom_args) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" - -def test_log_file_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --log-file {test_log_file_cmd.__name__}_check.log" +def test_log_file_cmd_check(): + command = f"ttrt check {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_log_file_cmd.__name__}" - -def test_artifact_dir(): +def test_artifact_dir_check(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -219,15 +186,7 @@ def test_artifact_dir(): check_instance = API.Check(args=custom_args) check_instance() - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" - -def test_artifact_dir_cmd(): - command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifact_dir_cmd.__name__}_check.log" +def test_artifact_dir_cmd_check(): + command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - - assert ( - check_results("check_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir_cmd.__name__}" diff --git a/runtime/tools/python/test/test_perf.py b/runtime/tools/python/test/test_perf.py index 149d998f84..e5c953b2f3 100644 --- a/runtime/tools/python/test/test_perf.py +++ b/runtime/tools/python/test/test_perf.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -24,31 +25,29 @@ from util import * -def test_flatbuffer(): +def test_flatbuffer_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH custom_args["--host-only"] = True perf_instance = API.Perf(args=custom_args) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer.__name__}" - -def test_flatbuffer_cmd(): - command = f"ttrt perf {PERF_BINARY_FILE_PATH} --log-file {test_flatbuffer_cmd.__name__}_perf.log --host-only" +def test_flatbuffer_cmd_perf(): + command = f"ttrt perf {PERF_BINARY_FILE_PATH} --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer_cmd.__name__}" - -def test_logger(): +def test_logger_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH custom_args["--host-only"] = True log_file_name = "test.log" @@ -56,37 +55,31 @@ def test_logger(): perf_instance = API.Perf(args=custom_args, logger=custom_logger) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" - -def test_clean_artifacts(): +def test_clean_artifacts_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--clean-artifacts"] = True perf_instance = API.Perf(args=custom_args) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" - -def test_clean_artifacts_cmd(): - command = f"ttrt perf {PERF_BINARY_FILE_PATH} --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_perf.log --host-only" +def test_clean_artifacts_cmd_perf(): + command = f"ttrt perf {PERF_BINARY_FILE_PATH} --clean-artifacts --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" - -def test_save_artifacts(): +def test_save_artifacts_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--clean-artifacts"] = True @@ -94,46 +87,36 @@ def test_save_artifacts(): perf_instance = API.Perf(args=custom_args) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" - -def test_save_artifacts_cmd(): - command = f"ttrt perf {PERF_BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_perf.log --host-only" +def test_save_artifacts_cmd_perf(): + command = f"ttrt perf {PERF_BINARY_FILE_PATH} --clean-artifacts --save-artifacts --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" - -def test_log_file(): +def test_log_file_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--log-file"] = "test.log" perf_instance = API.Perf(args=custom_args) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" - -def test_log_file_cmd(): - command = f"ttrt perf {PERF_BINARY_FILE_PATH} --log-file test.log --log-file {test_log_file_cmd.__name__}_perf.log --host-only" +def test_log_file_cmd_perf(): + command = f"ttrt perf {PERF_BINARY_FILE_PATH} --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_log_file_cmd.__name__}" - -def test_artifact_dir(): +def test_artifact_dir_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--clean-artifacts"] = True @@ -142,82 +125,59 @@ def test_artifact_dir(): perf_instance = API.Perf(args=custom_args) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" - -def test_artifact_dir_cmd(): - command = f"ttrt perf {PERF_BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/ttrt-artifacts --log-file {test_artifact_dir_cmd.__name__}_perf.log --host-only" +def test_artifact_dir_cmd_perf(): + command = f"ttrt perf {PERF_BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/ttrt-artifacts --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir_cmd.__name__}" - -def test_program_index(): +def test_program_index_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--program-index"] = "0" perf_instance = API.Perf(args=custom_args) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_program_index.__name__}" - -def test_program_index_cmd(): - command = f"ttrt perf {PERF_BINARY_FILE_PATH} --program-index 0 --log-file {test_program_index_cmd.__name__}_perf.log --host-only" +def test_program_index_cmd_perf(): + command = f"ttrt perf {PERF_BINARY_FILE_PATH} --program-index 0 --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_program_index_cmd.__name__}" - -def test_loops(): +def test_loops_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH custom_args["--host-only"] = True custom_args["--loops"] = 1 perf_instance = API.Perf(args=custom_args) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_loops.__name__}" - -def test_loops_cmd(): - command = f"ttrt perf {PERF_BINARY_FILE_PATH} --loops 1 --log-file {test_loops_cmd.__name__}_perf.log --host-only" +def test_loops_cmd_perf(): + command = f"ttrt perf {PERF_BINARY_FILE_PATH} --loops 1 --host-only --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_loops_cmd.__name__}" - -def test_device(): +def test_device_perf(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = PERF_BINARY_FILE_PATH perf_instance = API.Perf(args=custom_args) perf_instance() - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_device.__name__}" - -def test_device_cmd(): - command = f"ttrt perf {PERF_BINARY_FILE_PATH} --log-file {test_device_cmd.__name__}_perf.log" +def test_device_cmd_perf(): + command = f"ttrt perf {PERF_BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - - assert ( - check_results("perf_results.json") == 0 - ), f"one of more tests failed in={test_device_cmd.__name__}" diff --git a/runtime/tools/python/test/test_query.py b/runtime/tools/python/test/test_query.py index 3e3a6ece3e..5f8b67f368 100644 --- a/runtime/tools/python/test/test_query.py +++ b/runtime/tools/python/test/test_query.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -24,109 +25,91 @@ from util import * -def test_clean_artifacts(): +def test_clean_artifacts_query(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["--clean-artifacts"] = True query_instance = API.Query(args=custom_args) query_instance() - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" - -def test_clean_artifacts_cmd(): - command = f"ttrt query --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_query.log" +def test_clean_artifacts_cmd_query(): + command = f"ttrt query --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" - -def test_save_artifacts(): +def test_save_artifacts_query(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True query_instance = API.Query(args=custom_args) query_instance() - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" - -def test_save_artifacts_cmd(): - command = f"ttrt query --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_query.log" +def test_save_artifacts_cmd_query(): + command = f"ttrt query --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" - -def test_log_file(): +def test_log_file_query(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["--log-file"] = "test.log" query_instance = API.Query(args=custom_args) query_instance() - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" - -def test_log_file_cmd(): - command = f"ttrt query --log-file {test_log_file_cmd.__name__}_query.log" +def test_log_file_cmd_query(): + command = f"ttrt query --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_log_file_cmd.__name__}" - -def test_artifact_dir(): +def test_artifact_dir_query(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True custom_args["--artifact-dir"] = f"{os.getcwd()}/test-artifacts" query_instance = API.Query(args=custom_args) query_instance() - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" - -def test_artifact_dir_cmd(): - command = f"ttrt query --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifact_dir_cmd.__name__}_query.log" +def test_artifact_dir_cmd_query(): + command = f"ttrt query --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir_cmd.__name__}" - -def test_logger(): +def test_logger_query(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" log_file_name = "test.log" custom_logger = Logger(log_file_name) query_instance = API.Query(args=custom_args, logger=custom_logger) query_instance() - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" - -def test_artifacts(): +def test_artifacts_query(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" log_file_name = "test.log" custom_logger = Logger(log_file_name) artifacts_folder_path = f"{os.getcwd()}/test-artifacts" @@ -136,27 +119,18 @@ def test_artifacts(): query_instance = API.Query(args=custom_args, artifacts=custom_artifacts) query_instance() - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" - -def test_quiet(): +def test_quiet_query(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["--quiet"] = True query_instance = API.Query(args=custom_args) query_instance() - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_quiet.__name__}" - -def test_quiet_cmd(): - command = f"ttrt query --quiet --log-file {test_quiet_cmd.__name__}_query.log" +def test_quiet_cmd_query(): + command = f"ttrt query --quiet --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - - assert ( - check_results("query_results.json") == 0 - ), f"one of more tests failed in={test_quiet_cmd.__name__}" diff --git a/runtime/tools/python/test/test_read.py b/runtime/tools/python/test/test_read.py index 6570284ee8..3b8a6eb854 100644 --- a/runtime/tools/python/test/test_read.py +++ b/runtime/tools/python/test/test_read.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -24,76 +25,62 @@ from util import * -def test_flatbuffer(): +def test_flatbuffer_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH read_instance = API.Read(args=custom_args) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer.__name__}" - -def test_flatbuffer_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --log-file {test_flatbuffer_cmd.__name__}_read.log" +def test_flatbuffer_cmd_read(): + command = f"ttrt read {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer_cmd.__name__}" - -def test_dir_flatbuffer(): +def test_dir_flatbuffer_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = DIRECTORY_PATH read_instance = API.Read(args=custom_args) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer.__name__}" - -def test_dir_flatbuffer_cmd(): - command = f"ttrt read {DIRECTORY_PATH} --log-file {test_dir_flatbuffer_cmd.__name__}_read.log" +def test_dir_flatbuffer_cmd_read(): + command = f"ttrt read {DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer_cmd.__name__}" - -def test_logger(): +def test_logger_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) read_instance = API.Read(args=custom_args, logger=custom_logger) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" - -def test_logger_cmd(): - command = ( - f"ttrt read {BINARY_FILE_PATH} --log-file {test_logger_cmd.__name__}_read.log" - ) +def test_logger_cmd_read(): + command = f"ttrt read {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_logger_cmd.__name__}" - -def test_artifacts(): +def test_artifacts_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -104,81 +91,65 @@ def test_artifacts(): read_instance = API.Read(args=custom_args, artifacts=custom_artifacts) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" - -def test_artifacts_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifacts_cmd.__name__}_read.log" +def test_artifacts_cmd_read(): + command = f"ttrt read {BINARY_FILE_PATH} --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_artifacts_cmd.__name__}" - -def test_clean_artifacts(): +def test_clean_artifacts_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True read_instance = API.Read(args=custom_args) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" - -def test_clean_artifacts_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_read.log" +def test_clean_artifacts_cmd_read(): + command = f"ttrt read {BINARY_FILE_PATH} --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" - -def test_save_artifacts(): +def test_save_artifacts_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True read_instance = API.Read(args=custom_args) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" - -def test_save_artifacts_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_read.log" +def test_save_artifacts_cmd_read(): + command = f"ttrt read {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" - -def test_log_file(): +def test_log_file_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--log-file"] = "test.log" read_instance = API.Read(args=custom_args) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" - -def test_artifact_dir(): +def test_artifact_dir_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -186,28 +157,19 @@ def test_artifact_dir(): read_instance = API.Read(args=custom_args) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" - -def test_section(): +def test_section_read(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--section"] = "all" read_instance = API.Read(args=custom_args) read_instance() - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_section.__name__}" - -def test_section_cmd(): - command = f"ttrt read {BINARY_FILE_PATH} --section all --log-file {test_section_cmd.__name__}_read.log" +def test_section_cmd_read(): + command = f"ttrt read {BINARY_FILE_PATH} --section all --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - - assert ( - check_results("read_results.json") == 0 - ), f"one of more tests failed in={test_section_cmd.__name__}" diff --git a/runtime/tools/python/test/test_run.py b/runtime/tools/python/test/test_run.py index 0bbd21422f..df88ca8028 100644 --- a/runtime/tools/python/test/test_run.py +++ b/runtime/tools/python/test/test_run.py @@ -16,6 +16,7 @@ import shutil import atexit import pytest +import inspect import ttrt from ttrt.common.util import * @@ -24,67 +25,57 @@ from util import * -def test_flatbuffer(): +def test_flatbuffer_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer.__name__}" - -def test_flatbuffer_cmd(): - command = ( - f"ttrt run {BINARY_FILE_PATH} --log-file {test_flatbuffer_cmd.__name__}_run.log" - ) +def test_flatbuffer_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_flatbuffer_cmd.__name__}" - -def test_dir_flatbuffer(): +def test_dir_flatbuffer_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = DIRECTORY_PATH run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer.__name__}" - -def test_dir_flatbuffer_cmd(): - command = f"ttrt run {DIRECTORY_PATH} --log-file {test_dir_flatbuffer_cmd.__name__}_run.log" +def test_dir_flatbuffer_cmd_run(): + command = f"ttrt run {DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_dir_flatbuffer_cmd.__name__}" - -def test_logger(): +def test_logger_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) run_instance = API.Run(args=custom_args, logger=custom_logger) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_logger.__name__}" - -def test_artifacts(): +def test_artifacts_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH log_file_name = "test.log" custom_logger = Logger(log_file_name) @@ -95,83 +86,65 @@ def test_artifacts(): run_instance = API.Run(args=custom_args, artifacts=custom_artifacts) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_artifacts.__name__}" - -def test_clean_artifacts(): +def test_clean_artifacts_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts.__name__}" - -def test_clean_artifacts_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_run.log" +def test_clean_artifacts_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}" - -def test_save_artifacts(): +def test_save_artifacts_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts.__name__}" - -def test_save_artifacts_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_run.log" +def test_save_artifacts_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_save_artifacts_cmd.__name__}" - -def test_log_file(): +def test_log_file_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--log-file"] = "test.log" run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_log_file.__name__}" - -def test_log_file_cmd(): - command = ( - f"ttrt run {BINARY_FILE_PATH} --log-file {test_log_file_cmd.__name__}_run.log" - ) +def test_log_file_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_log_file_cmd.__name__}" - -def test_artifact_dir(): +def test_artifact_dir_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--clean-artifacts"] = True custom_args["--save-artifacts"] = True @@ -179,347 +152,245 @@ def test_artifact_dir(): run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir.__name__}" - -def test_artifact_dir_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifact_dir_cmd.__name__}_run.log" +def test_artifact_dir_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_artifact_dir_cmd.__name__}" - -def test_program_index(): +def test_program_index_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--program-index"] = "0" run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_program_index.__name__}" - -def test_program_index_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --program-index 0 --log-file {test_program_index_cmd.__name__}_run.log" +def test_program_index_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --program-index 0 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_program_index_cmd.__name__}" - -def test_loops(): +def test_loops_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--loops"] = 1 run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_loops.__name__}" - -def test_loops_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --loops 1 --log-file {test_loops_cmd.__name__}_run.log" +def test_loops_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --loops 1 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_loops_cmd.__name__}" - -def test_init(): +def test_init_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--init"] = "randn" run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_init.__name__}" - - -def test_init_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --init randn --log-file {test_init_cmd.__name__}_run.log" - sub_process_command(command) - - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_init_cmd.__name__}" - - -@pytest.mark.skip -def test_identity(): - API.initialize_apis() - custom_args = {} - custom_args["binary"] = BINARY_FILE_PATH - custom_args["--identity"] = True - run_instance = API.Run(args=custom_args) - run_instance() - - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_identity.__name__}" - -@pytest.mark.skip -def test_identity_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --identity --log-file {test_identity_cmd.__name__}_run.log" +def test_init_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --init randn --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_identity_cmd.__name__}" - -def test_non_zero(): +def test_non_zero_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--non-zero"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_non_zero.__name__}" - -def test_non_zero_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --non-zero --log-file {test_non_zero_cmd.__name__}_run.log" +def test_non_zero_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --non-zero --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_non_zero_cmd.__name__}" - -def test_rtol(): +def test_rtol_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--rtol"] = 1e-05 run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_rtol.__name__}" - -def test_rtol_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --rtol 1e-05 --log-file {test_rtol_cmd.__name__}_run.log" +def test_rtol_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --rtol 1e-05 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_rtol_cmd.__name__}" - -def test_atol(): +def test_atol_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--atol"] = 1e-08 run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_atol.__name__}" - -def test_atol_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --atol 1e-08 --log-file {test_atol_cmd.__name__}_run.log" +def test_atol_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --atol 1e-08 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_atol_cmd.__name__}" - -def test_seed(): +def test_seed_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--seed"] = 1 run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_seed.__name__}" - -def test_seed_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --seed 1 --log-file {test_seed_cmd.__name__}_run.log" +def test_seed_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --seed 1 --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_seed_cmd.__name__}" - -def test_load_kernels_from_disk(): +def test_load_kernels_from_disk_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--load-kernels-from-disk"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_load_kernels_from_disk.__name__}" - -def test_load_kernels_from_disk_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --load-kernels-from-disk --log-file {test_load_kernels_from_disk_cmd.__name__}_run.log" +def test_load_kernels_from_disk_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --load-kernels-from-disk --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_load_kernels_from_disk_cmd.__name__}" - -def test_enable_async_ttnn(): +def test_enable_async_ttnn_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--enable-async-ttnn"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_enable_async_ttnn.__name__}" - -def test_enable_async_ttnn_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --enable-async-ttnn --log-file {test_enable_async_ttnn_cmd.__name__}_run.log" +def test_enable_async_ttnn_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --enable-async-ttnn --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_enable_async_ttnn_cmd.__name__}" - -def test_disable_ignore_tile_shape(): +def test_disable_ignore_tile_shape_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--disable-ignore-tile-shape"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_ignore_tile_shape.__name__}" - -def test_disable_ignore_tile_shape_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --disable-ignore-tile-shape --log-file {test_disable_ignore_tile_shape_cmd.__name__}_run.log" +def test_disable_ignore_tile_shape_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --disable-ignore-tile-shape --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_ignore_tile_shape_cmd.__name__}" - -def test_disable_empty_op_row_major(): +def test_disable_empty_op_row_major_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--disable-empty-op-row-major"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_empty_op_row_major.__name__}" - -def test_disable_empty_op_row_major_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --disable-empty-op-row-major --log-file {test_disable_empty_op_row_major_cmd.__name__}_run.log" +def test_disable_empty_op_row_major_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --disable-empty-op-row-major --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_empty_op_row_major_cmd.__name__}" - -def test_disable_full_op_row_major(): +def test_disable_full_op_row_major_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--disable-full-op-row-major"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_full_op_row_major.__name__}" - -def test_disable_full_op_row_major_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --disable-full-op-row-major --log-file {test_disable_full_op_row_major_cmd.__name__}_run.log" +def test_disable_full_op_row_major_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --disable-full-op-row-major --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_full_op_row_major_cmd.__name__}" - -def test_disable_maxpool2d_preshard(): +def test_disable_maxpool2d_preshard_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--disable-maxpool2d-preshard"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_maxpool2d_preshard.__name__}" - -def test_disable_maxpool2d_preshard_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --disable-maxpool2d-preshard --log-file {test_disable_maxpool2d_preshard_cmd.__name__}_run.log" +def test_disable_maxpool2d_preshard_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --disable-maxpool2d-preshard --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_maxpool2d_preshard_cmd.__name__}" - -def test_disable_matmul_1d_program_config(): +def test_disable_matmul_1d_program_config_run(): API.initialize_apis() custom_args = {} + custom_args[ + "--result-file" + ] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json" custom_args["binary"] = BINARY_FILE_PATH custom_args["--disable-matmul-1d-program-config"] = True run_instance = API.Run(args=custom_args) run_instance() - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_matmul_1d_program_config.__name__}" - -def test_disable_matmul_1d_program_config_cmd(): - command = f"ttrt run {BINARY_FILE_PATH} --disable-matmul-1d-program-config --log-file {test_disable_matmul_1d_program_config_cmd.__name__}_run.log" +def test_disable_matmul_1d_program_config_cmd_run(): + command = f"ttrt run {BINARY_FILE_PATH} --disable-matmul-1d-program-config --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json" sub_process_command(command) - - assert ( - check_results("run_results.json") == 0 - ), f"one of more tests failed in={test_disable_matmul_1d_program_config_cmd.__name__}" diff --git a/runtime/tools/python/ttrt/common/check.py b/runtime/tools/python/ttrt/common/check.py index cece61553d..4fc2563826 100644 --- a/runtime/tools/python/ttrt/common/check.py +++ b/runtime/tools/python/ttrt/common/check.py @@ -60,6 +60,13 @@ def initialize_api(): choices=None, help="system desc to check against", ) + Check.register_arg( + name="--result-file", + type=str, + default="check_results.json", + choices=None, + help="test file to save results to", + ) Check.register_arg( name="binary", type=str, @@ -261,7 +268,7 @@ def postprocess(self): for bin in self.ttmetal_binaries: self.artifacts.save_binary(bin) - self.results.save_results("check_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing check API") diff --git a/runtime/tools/python/ttrt/common/perf.py b/runtime/tools/python/ttrt/common/perf.py index bb399a23b2..1413e86110 100644 --- a/runtime/tools/python/ttrt/common/perf.py +++ b/runtime/tools/python/ttrt/common/perf.py @@ -83,6 +83,13 @@ def initialize_api(): choices=None, help="port to run tracy client server application", ) + Perf.register_arg( + name="--result-file", + type=str, + default="perf_results.json", + choices=None, + help="test file to save results to", + ) Perf.register_arg( name="binary", type=str, @@ -537,7 +544,7 @@ def postprocess(self): else: self.logging.error(f"ERROR: test case={bin.file_path}") - self.results.save_results("perf_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing perf API") diff --git a/runtime/tools/python/ttrt/common/query.py b/runtime/tools/python/ttrt/common/query.py index 59aa31452d..6174ed0dba 100644 --- a/runtime/tools/python/ttrt/common/query.py +++ b/runtime/tools/python/ttrt/common/query.py @@ -59,6 +59,13 @@ def initialize_api(): choices=[True, False], help="suppress system desc from being printed", ) + Query.register_arg( + name="--result-file", + type=str, + default="query_results.json", + choices=None, + help="test file to save results to", + ) def __init__(self, args={}, logger=None, artifacts=None): for name, attributes in Query.registered_args.items(): @@ -153,7 +160,7 @@ def postprocess(self): else: self.logging.error(f"FAIL: getting system_desc failed") - self.results.save_results("query_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing query API") diff --git a/runtime/tools/python/ttrt/common/read.py b/runtime/tools/python/ttrt/common/read.py index a8ebbd62e6..aa04f461ce 100644 --- a/runtime/tools/python/ttrt/common/read.py +++ b/runtime/tools/python/ttrt/common/read.py @@ -68,6 +68,13 @@ def initialize_api(): choices=None, help="provides a directory path to save artifacts to", ) + Read.register_arg( + name="--result-file", + type=str, + default="read_results.json", + choices=None, + help="test file to save results to", + ) Read.register_arg( name="binary", type=str, @@ -314,7 +321,7 @@ def postprocess(self): else: self.logging.error(f"ERROR: test case={bin.file_path}") - self.results.save_results("read_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing read API") diff --git a/runtime/tools/python/ttrt/common/run.py b/runtime/tools/python/ttrt/common/run.py index a0be382017..976779e5fe 100644 --- a/runtime/tools/python/ttrt/common/run.py +++ b/runtime/tools/python/ttrt/common/run.py @@ -165,6 +165,13 @@ def initialize_api(): choices=[True, False], help="disable swap binary operands workaround", ) + Run.register_arg( + name="--result-file", + type=str, + default="run_results.json", + choices=None, + help="test file to save results to", + ) Run.register_arg( name="binary", type=str, @@ -556,7 +563,7 @@ def postprocess(self): else: self.logging.error(f"ERROR: test case={bin.file_path}") - self.results.save_results("run_results.json") + self.results.save_results(self["--result-file"]) self.logging.debug(f"------finished postprocessing run API") diff --git a/runtime/tools/python/ttrt/common/util.py b/runtime/tools/python/ttrt/common/util.py index 8093f1f461..751fa07663 100644 --- a/runtime/tools/python/ttrt/common/util.py +++ b/runtime/tools/python/ttrt/common/util.py @@ -673,6 +673,41 @@ def save_results(self, file_name="results.json"): self.logging.info(f"results saved to={file_name}") + # count total tests, skips and failures + with open(file_name, "r") as file: + data = json.load(file) + + import xml.etree.ElementTree as ET + + total_tests = len(data) + failures = sum(1 for item in data if item.get("result", "") != "pass") + skipped = sum(1 for item in data if item.get("result", "") == "skipped") + + testsuites = ET.Element("testsuites") + testsuites.set("name", "TTRT") + testsuites.set("tests", str(total_tests)) + testsuites.set("failures", str(failures)) + testsuites.set("skipped", str(skipped)) + + testsuite = ET.SubElement(testsuites, "testsuite") + testsuite.set("name", "TTRT") + testsuite.set("tests", str(total_tests)) + testsuite.set("failures", str(failures)) + testsuite.set("skipped", str(skipped)) + + for item in data: + testcase = ET.SubElement(testsuite, "testcase") + testcase.set("name", item.get("file_path", "")) + testcase.set("file_path", item.get("file_path", "")) + testcase.set("result", item.get("result", "")) + testcase.set("exception", item.get("exception", "")) + testcase.set("log_file", item.get("log_file", "")) + testcase.set("artifacts", item.get("artifacts", "")) + + tree = ET.ElementTree(testsuites) + xml_file_path = "ttrt_report.xml" + tree.write(xml_file_path, encoding="utf-8", xml_declaration=True) + def get_result_code(self): for entry in self.results: if entry.get("result") != "pass":