Skip to content

Commit

Permalink
Add CI for tt-explorer (#1109)
Browse files Browse the repository at this point in the history
  • Loading branch information
odjuricicTT authored Nov 1, 2024
1 parent 8e939d9 commit c78b606
Show file tree
Hide file tree
Showing 3 changed files with 68 additions and 2 deletions.
66 changes: 66 additions & 0 deletions .github/workflows/build-and-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -404,3 +404,69 @@ jobs:
run: |
source env/activate
pytest -ssv runtime/tools/python/test/test_perf.py
build-and-test-explorer:
needs: build-image
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
build: [
{runs-on: ubuntu-latest, enable_perf: OFF, name: "run", ttrt_flags: ""},
]

name: Build and test tt-explorer
runs-on: ${{ matrix.build.runs-on }}

container:
image: ${{ needs.build-image.outputs.docker-image }}

steps:

- uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Set reusable strings
id: strings
shell: bash
run: |
echo "work-dir=$(pwd)" >> "$GITHUB_OUTPUT"
echo "build-output-dir=$(pwd)/build" >> "$GITHUB_OUTPUT"
- name: Git safe dir
run: git config --global --add safe.directory ${{ steps.strings.outputs.work-dir }}

- name: ccache
uses: hendrikmuhs/ccache-action@v1.2
with:
create-symlink: true
key: ${{ matrix.build.runs-on }}-run-ON-perf-${{ matrix.build.enable_perf }}-${{ env.SDK_VERSION }}

- name: Configure CMake
shell: bash
run: |
source env/activate
cmake -G Ninja \
-B ${{ steps.strings.outputs.build-output-dir }} \
-DCMAKE_CXX_COMPILER=clang++-17 \
-DCMAKE_C_COMPILER=clang-17 \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DTTMLIR_ENABLE_RUNTIME=OFF \
-DTTMLIR_ENABLE_RUNTIME_TESTS=OFF \
-DTT_RUNTIME_ENABLE_PERF_TRACE=${{ matrix.build.enable_perf }} \
-DTTMLIR_ENABLE_STABLEHLO=OFF \
-S ${{ steps.strings.outputs.work-dir }}
- name: Build tt-explorer
shell: bash
run: |
source env/activate
cmake --build ${{ steps.strings.outputs.build-output-dir }} -- explorer
- name: Run tt-explorer tests
shell: bash
run: |
source env/activate
pytest tools/explorer/test/run_tests.py
2 changes: 1 addition & 1 deletion tools/explorer/test/models/forward_and_backward.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
module @SimpleModel attributes {tt.system_desc = #tt.system_desc<[{arch = <wormhole_b0>, grid = 8x8, l1_size = 1499136, num_dram_channels = 12, dram_channel_size = 1073741824, noc_l1_address_align_bytes = 16, pcie_address_align_bytes = 32, noc_dram_address_align_bytes = 32, l1_unreserved_base = 1024, erisc_l1_unreserved_base = 1024, dram_unreserved_base = 1024, dram_unreserved_end = 1073741824, physical_cores = {worker = [ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 1x0, 1x1, 1x2, 1x3, 1x4, 1x5, 1x6, 1x7, 2x0, 2x1, 2x2, 2x3, 2x4, 2x5, 2x6, 2x7, 3x0, 3x1, 3x2, 3x3, 3x4, 3x5, 3x6, 3x7, 4x0, 4x1, 4x2, 4x3, 4x4, 4x5, 4x6, 4x7, 5x0, 5x1, 5x2, 5x3, 5x4, 5x5, 5x6, 5x7, 6x0, 6x1, 6x2, 6x3, 6x4, 6x5, 6x6, 6x7, 7x0, 7x1, 7x2, 7x3, 7x4, 7x5, 7x6, 7x7] dram = [ 8x0, 9x0, 10x0, 8x1, 9x1, 10x1, 8x2, 9x2, 10x2, 8x3, 9x3, 10x3]}, supported_data_types = [<f32>, <f16>, <bf16>, <bfp_f8>, <bfp_bf8>, <bfp_f4>, <bfp_bf4>, <bfp_f2>, <bfp_bf2>, <u32>, <u16>, <u8>], supported_tile_sizes = [ 4x16, 16x16, 32x16, 4x32, 16x32, 32x32]}], [0], [3 : i32], [ 0x0x0x0]>} {
module @SimpleModel attributes {} {
func.func @forward(%arg0: tensor<1x784xf32> {ttir.name = "input_1"}, %arg1: tensor<10x784xf32> {ttir.name = "linear.weight"}, %arg2: tensor<10xf32> {ttir.name = "linear.bias"}) -> (tensor<1x10xf32> {ttir.name = "SimpleModel_472.output_softmax_1495"}) {
%0 = tensor.empty() : tensor<784x10xf32>
%1 = "ttir.transpose"(%arg1, %0) <{dim0 = -2 : si32, dim1 = -1 : si32, operand_constraints = [#tt.operand_constraint<dram|l1|scalar|tile|none|interleaved|single_bank|height_sharded|width_sharded|block_sharded|any_layout|any_device|any_device_tile|l1_block_sharded>, #tt.operand_constraint<dram|l1|scalar|tile|none|interleaved|single_bank|height_sharded|width_sharded|block_sharded|any_layout|any_device|any_device_tile|l1_block_sharded>]}> : (tensor<10x784xf32>, tensor<784x10xf32>) -> tensor<784x10xf32>
Expand Down
2 changes: 1 addition & 1 deletion tools/explorer/test/models/linear_autoencoder.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
module @LinearAE attributes {tt.system_desc = #tt.system_desc<[{arch = <wormhole_b0>, grid = 8x8, l1_size = 1499136, num_dram_channels = 12, dram_channel_size = 1073741824, noc_l1_address_align_bytes = 16, pcie_address_align_bytes = 32, noc_dram_address_align_bytes = 32, l1_unreserved_base = 1024, erisc_l1_unreserved_base = 1024, dram_unreserved_base = 1024, dram_unreserved_end = 1073741824, physical_cores = {worker = [ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 1x0, 1x1, 1x2, 1x3, 1x4, 1x5, 1x6, 1x7, 2x0, 2x1, 2x2, 2x3, 2x4, 2x5, 2x6, 2x7, 3x0, 3x1, 3x2, 3x3, 3x4, 3x5, 3x6, 3x7, 4x0, 4x1, 4x2, 4x3, 4x4, 4x5, 4x6, 4x7, 5x0, 5x1, 5x2, 5x3, 5x4, 5x5, 5x6, 5x7, 6x0, 6x1, 6x2, 6x3, 6x4, 6x5, 6x6, 6x7, 7x0, 7x1, 7x2, 7x3, 7x4, 7x5, 7x6, 7x7] dram = [ 8x0, 9x0, 10x0, 8x1, 9x1, 10x1, 8x2, 9x2, 10x2, 8x3, 9x3, 10x3]}, supported_data_types = [<f32>, <f16>, <bf16>, <bfp_f8>, <bfp_bf8>, <bfp_f4>, <bfp_bf4>, <bfp_f2>, <bfp_bf2>, <u32>, <u16>, <u8>], supported_tile_sizes = [ 4x16, 16x16, 32x16, 4x32, 16x32, 32x32]}], [0], [3 : i32], [ 0x0x0x0]>} {
module @LinearAE attributes {} {
func.func @forward(%arg0: tensor<1x784xf32> {ttir.name = "input_1"}, %arg1: tensor<784x128xf32> {ttir.name = "encoder_lin1.weight"}, %arg2: tensor<128xf32> {ttir.name = "encoder_lin1.bias"}, %arg3: tensor<128x64xf32> {ttir.name = "encoder_lin2.weight"}, %arg4: tensor<64xf32> {ttir.name = "encoder_lin2.bias"}, %arg5: tensor<64x12xf32> {ttir.name = "encoder_lin3.weight"}, %arg6: tensor<12xf32> {ttir.name = "encoder_lin3.bias"}, %arg7: tensor<12x3xf32> {ttir.name = "encoder_lin4.weight"}, %arg8: tensor<3xf32> {ttir.name = "encoder_lin4.bias"}, %arg9: tensor<3x12xf32> {ttir.name = "decoder_lin1.weight"}, %arg10: tensor<12xf32> {ttir.name = "decoder_lin1.bias"}, %arg11: tensor<12x64xf32> {ttir.name = "decoder_lin2.weight"}, %arg12: tensor<64xf32> {ttir.name = "decoder_lin2.bias"}, %arg13: tensor<64x128xf32> {ttir.name = "decoder_lin3.weight"}, %arg14: tensor<128xf32> {ttir.name = "decoder_lin3.bias"}, %arg15: tensor<128x784xf32> {ttir.name = "decoder_lin4.weight"}, %arg16: tensor<784xf32> {ttir.name = "decoder_lin4.bias"}) -> (tensor<1x784xf32> {ttir.name = "LinearAE.output_add_29"}) {
%0 = tensor.empty() : tensor<1x128xf32>
%1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#tt.operand_constraint<dram|l1|scalar|tile|none|interleaved|single_bank|height_sharded|width_sharded|block_sharded|any_layout|any_device|any_device_tile|l1_block_sharded>, #tt.operand_constraint<dram|l1|scalar|tile|none|interleaved|single_bank|height_sharded|width_sharded|block_sharded|any_layout|any_device|any_device_tile|l1_block_sharded>, #tt.operand_constraint<dram|l1|scalar|tile|none|interleaved|single_bank|height_sharded|width_sharded|block_sharded|any_layout|any_device|any_device_tile|l1_block_sharded>]}> : (tensor<1x784xf32>, tensor<784x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
Expand Down

0 comments on commit c78b606

Please sign in to comment.