Skip to content

Commit

Permalink
#292: Added option to dump test results to a custom file and enable C…
Browse files Browse the repository at this point in the history
…I workflow to summarize ttrt run and perf test results as well as ttrt api test results (#816)
  • Loading branch information
tapspatel authored Nov 1, 2024
1 parent 9bd5963 commit d3ef8e5
Show file tree
Hide file tree
Showing 14 changed files with 455 additions and 626 deletions.
7 changes: 7 additions & 0 deletions .github/workflows/build-and-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,13 @@ jobs:
name: ${{ matrix.build.runs-on }}_${{ matrix.build.name }}_results.json
path: ${{ matrix.build.name }}_results.json

- name: Show Test Report
uses: mikepenz/action-junit-report@v4
if: success() || failure()
with:
report_paths: ttrt_report.xml
check_name: TTRT ${{ matrix.build.runs-on }} ${{ matrix.build.name }} Tests

run-ttrt-tests:

timeout-minutes: 30
Expand Down
36 changes: 18 additions & 18 deletions docs/src/ttrt.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,19 @@ cmake --build build -- ttrt
ttrt --help
```

### Building runtime mode
Add the following flags when building the compiler
```bash
-DTTMLIR_ENABLE_RUNTIME=ON
```

### Building perf mode
Add the following flags when building the compiler
```bash
-DTTMLIR_ENABLE_RUNTIME=ON
-DTT_RUNTIME_ENABLE_PERF_TRACE=ON
```

## LOGGER Levels
ttrt support logging at different logger levels. You will need to set env var `TTRT_LOGGER_LEVEL`. By default, it will print all log messages.
```bash
Expand All @@ -33,24 +46,6 @@ source ttrt_env/bin/activate
pip install ttrt-0.0.235-cp310-cp310-linux_x86_64.whl
```

### Building runtime mode
Add the following flags when building the compiler
```bash
-DTTMLIR_ENABLE_RUNTIME=ON
```

If you are building with runtime mode on with `-DTTMLIR_ENABLE_RUNTIME=ON`, you will have to install the following packages when using ttrt
```bash
pip install torch
```

### Building perf mode
Add the following flags when building the compiler
```bash
-DTTMLIR_ENABLE_RUNTIME=ON
-DTT_RUNTIME_ENABLE_PERF_TRACE=ON
```

## Generate a flatbuffer file from compiler
The compiler supports a pass to load a system descriptor to compile against. You can feed this pass into ttmlir-opt.

Expand Down Expand Up @@ -153,6 +148,7 @@ ttrt read system_desc.ttsys
ttrt read --section system_desc system_desc.ttsys
ttrt read system_desc.ttsys --log-file ttrt.log
ttrt read out.ttnn --save-artifacts --artifact-dir /path/to/some/dir
ttrt read out.ttnn --result-file result.json
```

### run
Expand All @@ -177,6 +173,7 @@ ttrt run /dir/of/flatbuffers --log-file ttrt.log
ttrt run out.ttnn --save-artifacts --artifact-dir /path/to/some/dir
ttrt run out.ttnn --load-kernels-from-disk
ttrt run out.ttnn --enable-async-ttnn
ttrt run out.ttnn --result-file result.json
```

### query
Expand All @@ -191,6 +188,7 @@ ttrt query --save-artifacts
ttrt query --clean-artifacts
ttrt query --save-artifacts --log-file ttrt.log
ttrt query --save-artifacts --artifact-dir /path/to/some/dir
ttrt query --result-file result.json
```

### perf
Expand Down Expand Up @@ -219,6 +217,7 @@ ttrt perf /dir/of/flatbuffers --host-only
ttrt perf /dir/of/flatbuffers --loops 10 --host-only
ttrt perf /dir/of/flatbuffers --log-file ttrt.log --host-only
ttrt perf --save-artifacts --artifact-dir /path/to/some/dir
ttrt perf out.ttnn --result-file result.json
```

To use the Tracy GUI, run the following instructions on your macbook. You can upload your .tracy file into the GUI to view the profiled dumps.
Expand All @@ -242,6 +241,7 @@ ttrt check out.ttnn --save-artifacts
ttrt check out.ttnn --log-file ttrt.log
ttrt check /dir/of/flatbuffers --system-desc /dir/of/system_desc
ttrt check --save-artifacts --artifact-dir /path/to/some/dir out.ttnn
ttrt check out.ttnn --result-file result.json
```

## ttrt as a python package
Expand Down
26 changes: 26 additions & 0 deletions runtime/tools/python/test/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC
#
# SPDX-License-Identifier: Apache-2.0

import os
import pytest

from util import *


@pytest.fixture(scope="session", autouse=True)
def session_setup():
directory_name = "ttrt-results"
if not os.path.exists(directory_name):
try:
os.mkdir(directory_name)
except Exception as e:
print(f"An error occurred while creating the directory: {e}")

yield


def pytest_runtest_teardown(item, nextitem):
assert (
check_results(f"ttrt-results/{item.name}.json") == 0
), f"one of more tests failed in={item.name}"
155 changes: 57 additions & 98 deletions runtime/tools/python/test/test_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import shutil
import atexit
import pytest
import inspect

import ttrt
from ttrt.common.util import *
Expand All @@ -24,154 +25,126 @@
from util import *


def test_clean_artifacts():
def test_clean_artifacts_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
custom_args["--clean-artifacts"] = True
check_instance = API.Check(args=custom_args)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_clean_artifacts.__name__}"


def test_clean_artifacts_cmd():
command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --log-file {test_clean_artifacts_cmd.__name__}_check.log"
def test_clean_artifacts_cmd_check():
command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json"
sub_process_command(command)

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_clean_artifacts_cmd.__name__}"


def test_save_artifacts():
def test_save_artifacts_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
custom_args["--clean-artifacts"] = True
custom_args["--save-artifacts"] = True
check_instance = API.Check(args=custom_args)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_save_artifacts.__name__}"


def test_save_artifacts_cmd():
command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file {test_save_artifacts_cmd.__name__}_check.log"
def test_save_artifacts_cmd_check():
command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json"
sub_process_command(command)

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_save_artifacts_cmd.__name__}"


def test_flatbuffer():
def test_flatbuffer_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
check_instance = API.Check(args=custom_args)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_flatbuffer.__name__}"


def test_flatbuffer_cmd():
command = f"ttrt check {BINARY_FILE_PATH} --log-file {test_flatbuffer_cmd.__name__}_check.log"
def test_flatbuffer_cmd_check():
command = f"ttrt check {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json"
sub_process_command(command)

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_flatbuffer_cmd.__name__}"


def test_dir_flatbuffer():
def test_dir_flatbuffer_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = DIRECTORY_PATH
check_instance = API.Check(args=custom_args)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_dir_flatbuffer.__name__}"


def test_dir_flatbuffer_cmd():
command = f"ttrt check {DIRECTORY_PATH} --log-file {test_dir_flatbuffer_cmd.__name__}_check.log"
def test_dir_flatbuffer_cmd_check():
command = f"ttrt check {DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json"
sub_process_command(command)

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_dir_flatbuffer_cmd.__name__}"


def test_system_desc():
def test_system_desc_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
custom_args["--system-desc"] = SYSTEM_DESC_FILE_PATH
check_instance = API.Check(args=custom_args)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_system_desc.__name__}"


def test_system_desc_cmd():
command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_FILE_PATH} --log-file {test_system_desc_cmd.__name__}_check.log"
def test_system_desc_cmd_check():
command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json"
sub_process_command(command)

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_system_desc_cmd.__name__}"


def test_dir_system_descs():
def test_dir_system_descs_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
custom_args["--system-desc"] = SYSTEM_DESC_DIRECTORY_PATH
check_instance = API.Check(args=custom_args)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_dir_system_descs.__name__}"


def test_dir_system_descs_cmd():
command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_DIRECTORY_PATH} --log-file {test_dir_system_descs_cmd.__name__}_check.log"
def test_dir_system_descs_cmd_check():
command = f"ttrt check {BINARY_FILE_PATH} --system-desc {SYSTEM_DESC_DIRECTORY_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json"
sub_process_command(command)

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_dir_system_descs_cmd.__name__}"


def test_logger():
def test_logger_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
log_file_name = "test.log"
custom_logger = Logger(log_file_name)
check_instance = API.Check(args=custom_args, logger=custom_logger)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_logger.__name__}"


def test_artifacts():
def test_artifacts_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
log_file_name = "test.log"
custom_logger = Logger(log_file_name)
Expand All @@ -182,52 +155,38 @@ def test_artifacts():
check_instance = API.Check(args=custom_args, artifacts=custom_artifacts)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_artifacts.__name__}"


def test_log_file():
def test_log_file_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
custom_args["--log-file"] = "test.log"
check_instance = API.Check(args=custom_args)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_log_file.__name__}"


def test_log_file_cmd():
command = f"ttrt check {BINARY_FILE_PATH} --log-file {test_log_file_cmd.__name__}_check.log"
def test_log_file_cmd_check():
command = f"ttrt check {BINARY_FILE_PATH} --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json"
sub_process_command(command)

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_log_file_cmd.__name__}"


def test_artifact_dir():
def test_artifact_dir_check():
API.initialize_apis()
custom_args = {}
custom_args[
"--result-file"
] = f"ttrt-results/{inspect.currentframe().f_code.co_name}.json"
custom_args["binary"] = BINARY_FILE_PATH
custom_args["--clean-artifacts"] = True
custom_args["--save-artifacts"] = True
custom_args["--artifact-dir"] = f"{os.getcwd()}/test-artifacts"
check_instance = API.Check(args=custom_args)
check_instance()

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_artifact_dir.__name__}"


def test_artifact_dir_cmd():
command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file {test_artifact_dir_cmd.__name__}_check.log"
def test_artifact_dir_cmd_check():
command = f"ttrt check {BINARY_FILE_PATH} --clean-artifacts --save-artifacts --artifact-dir {os.getcwd()}/test-artifacts --log-file ttrt-results/{inspect.currentframe().f_code.co_name}.log --result-file ttrt-results/{inspect.currentframe().f_code.co_name}.json"
sub_process_command(command)

assert (
check_results("check_results.json") == 0
), f"one of more tests failed in={test_artifact_dir_cmd.__name__}"
Loading

0 comments on commit d3ef8e5

Please sign in to comment.