-
Notifications
You must be signed in to change notification settings - Fork 14
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Update docs for EmitC testing (#1918)
- Loading branch information
1 parent
4ba1b73
commit 9e5980e
Showing
13 changed files
with
125 additions
and
109 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
# EmitC testing | ||
|
||
To locally run EmitC tests: | ||
|
||
```bash | ||
# Generate flatbuffers and .cpp files | ||
llvm-lit -sv test/ttmlir/EmitC/TTNN | ||
|
||
# Compile .cpp files to shared objects | ||
tools/ttnn-standalone/ci_compile_dylib.py | ||
|
||
# Run flatbuffers + shared objects and compare results | ||
ttrt run --emitc build/test/ttmlir/EmitC/TTNN | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
# Lit testing | ||
|
||
`llvm-lit` tool is used for MLIR testing. With it you can: | ||
|
||
```bash | ||
# Query which tests are available | ||
llvm-lit -sv ./build/test --show-tests | ||
|
||
# Run an individual test: | ||
llvm-lit -sv ./build/test/ttmlir/Dialect/TTIR/test_allocate.mlir | ||
|
||
# Run a sub-suite: | ||
llvm-lit -sv ./build/test/ttmlir/Dialect/TTIR | ||
``` | ||
|
||
> See the full [llvm-lit documentation](https://llvm.org/docs/CommandGuide/lit.html) for more information. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
# Testing | ||
|
||
To run tests: | ||
|
||
```bash | ||
source env/activate | ||
cmake --build build -- check-ttmlir | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
# `ttnn-standalone` | ||
|
||
TTNN Standalone is a post-compile tuning/debugging tool. | ||
|
||
Forge and third party ML models (PyTorch, Jax, ONNX, ...) can be compiled to a set of TTNN library calls in C++. This generated code can then be used outside of the compiler environment. TTNN Standalone tool offers all the scaffolding needed to run the C++ code on device (build & run scripts). | ||
|
||
### Usage | ||
|
||
```bash | ||
# Compile a model to EmitC dialect => translate to C++ code => pipe to ttnn-standalone.cpp | ||
./build/bin/ttmlir-opt --ttir-to-emitc-pipeline test/ttmlir/EmitC/TTNN/sanity_add.mlir \ | ||
| ./build/bin/ttmlir-translate --mlir-to-cpp \ | ||
> tools/ttnn-standalone/ttnn-standalone.cpp | ||
|
||
# Change dir to `tools/ttnn-standalone` and use the `run` script to compile and run the ttnn standalone: | ||
cd tools/ttnn-standalone | ||
./run | ||
``` | ||
|
||
Note: if you receive this error | ||
```bash | ||
-bash: ./run: Permission denied | ||
``` | ||
running `chmod +x run` will allow the execution of the script. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir | ||
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %basename_t.ttnn | ||
// RUN: ttmlir-opt --ttnn-modify-signatures-for-dylib --convert-ttnn-to-emitc %t.mlir > %t2.mlir | ||
// RUN: ttmlir-translate --mlir-to-cpp %t2.mlir > %basename_t.cpp | ||
|
||
func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>) -> tensor<64x96xbf16> { | ||
%0 = tensor.empty() : tensor<64x96xbf16> | ||
%1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> | ||
return %1 : tensor<64x96xbf16> | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,36 +1,3 @@ | ||
## Table of contents | ||
# TTNN Standalone | ||
|
||
- [TTNN Standalone](#ttnn-standalone) | ||
- [Usage](#usage) | ||
- [TTNN Dylib](#ttnn-dylib) | ||
|
||
## TTNN Standalone | ||
|
||
TTNN Standalone is a post-compile tuning tool. | ||
|
||
Third party ML models (PyTorch, Jax, ONNX, ...) can be compiled to a set of TTNN library calls in C++. This generated code can then be manually fine-tuned outside of the compiler environment. TTNN Standalone tool offers all the scaffolding needed to run the C++ code on device (build & run scripts). | ||
|
||
### Usage | ||
|
||
```bash | ||
# Compile a model to C++ code | ||
./build/bin/ttmlir-opt --ttir-to-emitc-pipeline test/ttmlir/Silicon/TTNN/emitc/simple_add.mlir | ./build/bin/ttmlir-translate --mlir-to-cpp | ||
|
||
# Copy paste the generated function into `ttnn-standalone.cpp`. | ||
|
||
# Adapt the `main()` function in `ttnn-standalone.cpp` to feed tensors needed for the model | ||
|
||
# Run the following script from within this folder (`tools/ttnn-standalone`) to compile and run the ttnn standalone: | ||
|
||
./run | ||
``` | ||
|
||
Note: if you receive this error | ||
```bash | ||
-bash: ./run: Permission denied | ||
``` | ||
running `chmod +x run` will allow the execution of the script. | ||
|
||
## TTNN Dylib | ||
|
||
Similarly to the Standalone, this tool offers the ability to compile third party ML models, but to dylibs. Initial intent for compiled dylibs is to be used in testing infrastructure, but sky's the limit :) | ||
Please refer to [TTMLIR docs](https://docs.tenstorrent.com/tt-mlir/ttnn-standalone.html). |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,70 +1,44 @@ | ||
// SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC | ||
// SPDX-FileCopyrightText: (c) 2025 Tenstorrent AI ULC | ||
// | ||
// SPDX-License-Identifier: Apache-2.0 | ||
|
||
#include "ttnn-precompiled.hpp" | ||
|
||
// To generate forward function, run: | ||
// ./build/bin/ttmlir-opt --ttir-to-emitc-pipeline | ||
// test/ttmlir/Silicon/TTNN/emitc/simple_add.mlir | ./build/bin/ttmlir-translate | ||
// --mlir-to-cpp | ||
|
||
ttnn::Tensor forward(ttnn::Tensor v1, ttnn::Tensor v2) { | ||
ttnn::IDevice *v3 = ttnn::DeviceGetter::getInstance(); | ||
ttnn::MemoryConfig v4 = ttnn::MemoryConfig( | ||
ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); | ||
ttnn::Tensor add(ttnn::Tensor v1, ttnn::Tensor v2) { | ||
ttnn::IDevice* v3 = ttnn::DeviceGetter::getInstance(); | ||
ttnn::MemoryConfig v4 = ttnn::MemoryConfig(ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); | ||
ttnn::Tensor v5 = ttnn::to_device(v1, v3, v4); | ||
ttnn::Tensor v6 = | ||
ttnn::to_layout(v5, ttnn::Layout::TILE, std::nullopt, std::nullopt, | ||
static_cast<::ttnn::IDevice *>(nullptr)); | ||
ttnn::Tensor v6 = ttnn::to_layout(v5, ttnn::Layout::TILE, std::nullopt, std::nullopt, static_cast<::ttnn::IDevice *>(nullptr)); | ||
ttnn::deallocate(v5, false); | ||
ttnn::MemoryConfig v7 = ttnn::MemoryConfig( | ||
ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); | ||
ttnn::MemoryConfig v7 = ttnn::MemoryConfig(ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); | ||
ttnn::Tensor v8 = ttnn::to_device(v2, v3, v7); | ||
ttnn::Tensor v9 = | ||
ttnn::to_layout(v8, ttnn::Layout::TILE, std::nullopt, std::nullopt, | ||
static_cast<::ttnn::IDevice *>(nullptr)); | ||
ttnn::Tensor v9 = ttnn::to_layout(v8, ttnn::Layout::TILE, std::nullopt, std::nullopt, static_cast<::ttnn::IDevice *>(nullptr)); | ||
ttnn::deallocate(v8, false); | ||
ttnn::SimpleShape v10 = ttnn::SimpleShape({ | ||
32, | ||
32, | ||
}); | ||
ttnn::MemoryConfig v11 = ttnn::MemoryConfig( | ||
ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); | ||
ttnn::Tensor v12 = | ||
ttnn::empty(v10, ttnn::DataType::BFLOAT16, ttnn::Layout::TILE, v3, v11); | ||
ttnn::SimpleShape v10 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({32, 32, })); | ||
ttnn::MemoryConfig v11 = ttnn::MemoryConfig(ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); | ||
ttnn::Tensor v12 = ttnn::empty(v10, ttnn::DataType::BFLOAT16, ttnn::Layout::TILE, v3, v11); | ||
ttnn::Tensor v13 = ttnn::add(v6, v9, std::nullopt, std::nullopt, v12); | ||
ttnn::deallocate(v9, false); | ||
ttnn::deallocate(v6, false); | ||
ttnn::Tensor v14 = ttnn::from_device(v13); | ||
ttnn::deallocate(v12, false); | ||
ttnn::Tensor v15 = | ||
ttnn::to_layout(v14, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt, | ||
static_cast<::ttnn::IDevice *>(nullptr)); | ||
ttnn::Tensor v15 = ttnn::to_layout(v14, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt, static_cast<::ttnn::IDevice *>(nullptr)); | ||
ttnn::deallocate(v14, false); | ||
return v15; | ||
} | ||
|
||
int main() { | ||
// Create shapes | ||
// | ||
const size_t tensor_height = 32; | ||
const size_t tensor_width = 32; | ||
ttnn::SimpleShape xs = | ||
ttnn::SimpleShape({1, 1, tensor_height, tensor_width}); | ||
ttnn::SimpleShape ys = | ||
ttnn::SimpleShape({1, 1, tensor_height, tensor_width}); | ||
|
||
// Create tensors on cpu | ||
// | ||
auto x = ttnn::ones(xs, ttnn::DataType::BFLOAT16, ttnn::Layout::TILE); | ||
auto y = ttnn::ones(ys, ttnn::DataType::BFLOAT16, ttnn::Layout::TILE); | ||
|
||
// Run fwd pass on device | ||
// | ||
ttnn::Tensor result = forward(x, y); | ||
std::tuple<ttnn::Tensor, ttnn::Tensor> createInputsFor_add() { | ||
ttnn::SimpleShape v1 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({32, 32, })); | ||
ttnn::Tensor v2 = ttnn::ones(v1, ttnn::DataType::BFLOAT16, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt); | ||
ttnn::SimpleShape v3 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({32, 32, })); | ||
ttnn::Tensor v4 = ttnn::ones(v3, ttnn::DataType::BFLOAT16, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt); | ||
return std::make_tuple(v2, v4); | ||
} | ||
|
||
// Print result | ||
// | ||
result.print(); | ||
int32_t main() { | ||
ttnn::Tensor v1; | ||
ttnn::Tensor v2; | ||
std::tie(v1, v2) = createInputsFor_add(); | ||
ttnn::Tensor v3 = add(v1, v2); | ||
int32_t v4 = 0; | ||
return v4; | ||
} |