Skip to content

Commit

Permalink
#1108: Added ability to dump golden data into flatbuffer file for int…
Browse files Browse the repository at this point in the history
…ermediate ops and output using python infra for generating ttir modules
  • Loading branch information
tapspatel committed Nov 6, 2024
1 parent ae93524 commit a72ff16
Show file tree
Hide file tree
Showing 16 changed files with 325 additions and 409 deletions.
20 changes: 20 additions & 0 deletions include/ttmlir/Target/Common/debug_info.fbs
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
include "Common/types.fbs";

namespace tt.target;

table GoldenTensor {
name: string;
shape: [int64];
stride: [int64];
dtype: tt.target.DataType;
data: [uint8];
}

table GoldenKV {
key: string;
value: GoldenTensor;
}

table GoldenInfo {
golden_map: [GoldenKV];
}

table MLIR {
name: string;
source: string;
Expand All @@ -8,4 +27,5 @@ table MLIR {
table DebugInfo {
mlir: MLIR;
cpp: string;
golden_info: GoldenInfo;
}
6 changes: 4 additions & 2 deletions include/ttmlir/Target/TTMetal/TTMetalToFlatbuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,15 @@

#include "mlir/IR/Operation.h"
#include "mlir/Support/LogicalResult.h"
#include "ttmlir/Target/Utils/MLIRToFlatbuffer.h"

namespace mlir::tt::ttmetal {

// Translates a TTMetal operation to a flatbuffer and writes it to the given
// stream.
LogicalResult translateTTMetalToFlatbuffer(Operation *op,
llvm::raw_ostream &os);
LogicalResult translateTTMetalToFlatbuffer(
Operation *op, llvm::raw_ostream &os,
std::unordered_map<std::string, GoldenTensor> goldenMap = {});
} // namespace mlir::tt::ttmetal

#endif
2 changes: 2 additions & 0 deletions include/ttmlir/Target/TTMetal/binary.fbs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
include "Common/types.fbs";
include "Common/version.fbs";
include "Common/debug_info.fbs";
include "command.fbs";

namespace tt.target.metal;
Expand All @@ -15,6 +16,7 @@ table Program {
inputs: [TensorRef];
outputs: [TensorRef];
device_programs: [DeviceProgram];
debug_info: DebugInfo;
}

table TTMetalBinary {
Expand Down
9 changes: 7 additions & 2 deletions include/ttmlir/Target/TTNN/TTNNToFlatbuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,21 @@

#include "mlir/IR/Operation.h"
#include "mlir/Support/LogicalResult.h"
#include "ttmlir/Target/Utils/MLIRToFlatbuffer.h"

namespace mlir::tt::ttnn {

// Convert a TTNNIR operation to a flatbuffer
std::shared_ptr<void> ttnnToFlatbuffer(Operation *op);
std::shared_ptr<void>
ttnnToFlatbuffer(Operation *op,
std::unordered_map<std::string, GoldenTensor> goldenMap = {});

// Convert a TTNNIR operation to a flatbuffer
// This function signature is required in order to register the conversion in
// mlir translation framework
LogicalResult translateTTNNToFlatbuffer(Operation *op, llvm::raw_ostream &os);
LogicalResult translateTTNNToFlatbuffer(
Operation *op, llvm::raw_ostream &os,
std::unordered_map<std::string, GoldenTensor> goldenMap = {});
} // namespace mlir::tt::ttnn

#endif
25 changes: 25 additions & 0 deletions include/ttmlir/Target/Utils/MLIRToFlatbuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#ifndef TTMLIR_TARGET_UTILS_MLIRTOFLATBUFFER_H
#define TTMLIR_TARGET_UTILS_MLIRTOFLATBUFFER_H

#include <numeric>
#include <type_traits>

#include "flatbuffers/flatbuffers.h"
Expand All @@ -15,6 +16,30 @@
#include "ttmlir/Utils.h"

namespace mlir::tt {
struct GoldenTensor {
std::string name;
std::vector<int64_t> shape;
std::vector<int64_t> strides;
::tt::target::DataType dtype;
std::uint8_t *data;

GoldenTensor(std::string name, std::vector<int64_t> shape,
std::vector<int64_t> strides, ::tt::target::DataType dtype,
std::uint8_t *data)
: name(name), shape(shape), strides(strides), dtype(dtype), data(data) {}

std::vector<std::uint8_t> convertDataToVector() {
std::vector<std::uint8_t> dataVec;
int totalDataSize = std::accumulate(this->shape.begin(), this->shape.end(),
1, std::multiplies<int64_t>()) *
4; // 4 is the size of float32
for (int i = 0; i < totalDataSize; i++) {
dataVec.push_back(this->data[i]);
}
return dataVec;
}
};

inline ::tt::target::OOBVal toFlatbuffer(FlatbufferObjectCache &,
OOBVal oobVal) {
switch (oobVal) {
Expand Down
10 changes: 6 additions & 4 deletions lib/Target/TTMetal/TTMetalToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,8 @@ Value getOperandThroughDPSOps(Value value) {
return value;
}

static std::shared_ptr<void> translateModuleToFlatbuffer(Operation *op) {
static std::shared_ptr<void> translateModuleToFlatbuffer(
Operation *op, std::unordered_map<std::string, GoldenTensor> goldenMap) {
::flatbuffers::FlatBufferBuilder fbb;
FlatbufferObjectCache cache(&fbb);

Expand Down Expand Up @@ -372,9 +373,10 @@ static std::shared_ptr<void> translateModuleToFlatbuffer(Operation *op) {
return serializedBinary;
}

LogicalResult translateTTMetalToFlatbuffer(Operation *op,
llvm::raw_ostream &os) {
std::shared_ptr<void> data = translateModuleToFlatbuffer(op);
LogicalResult translateTTMetalToFlatbuffer(
Operation *op, llvm::raw_ostream &os,
std::unordered_map<std::string, GoldenTensor> goldenMap) {
std::shared_ptr<void> data = translateModuleToFlatbuffer(op, goldenMap);
std::size_t size = ::flatbuffers::GetSizePrefixedBufferLength(
static_cast<const uint8_t *>(data.get()));
os.write(reinterpret_cast<char const *>(data.get()), size);
Expand Down
4 changes: 3 additions & 1 deletion lib/Target/TTMetal/TTMetalToFlatbufferRegistration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ namespace mlir::tt::ttmetal {
void registerTTMetalToFlatbuffer() {
TranslateFromMLIRRegistration reg(
"ttmetal-to-flatbuffer", "translate ttmetal dialect to flatbuffer",
translateTTMetalToFlatbuffer /* function */,
[](Operation *op, llvm::raw_ostream &os) -> LogicalResult {
return translateTTMetalToFlatbuffer(op, os, {});
},
[](DialectRegistry &registry) {
registry.insert<mlir::tt::TTDialect, mlir::tt::ttmetal::TTMetalDialect,
mlir::tt::ttkernel::TTKernelDialect,
Expand Down
28 changes: 24 additions & 4 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"

#include "ttmlir/Target/Common/debug_info_generated.h"

#include <cassert>
#include <fstream>
#include <optional>
Expand Down Expand Up @@ -707,7 +709,9 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op,
llvm_unreachable("unhandled op in emitTTNNOperation");
}

std::shared_ptr<void> ttnnToFlatbuffer(Operation *op) {
std::shared_ptr<void>
ttnnToFlatbuffer(Operation *op,
std::unordered_map<std::string, GoldenTensor> goldenMap) {
ModuleOp module = dyn_cast<ModuleOp>(op);
assert(module && "Expected ModuleOp as top level operation");

Expand All @@ -728,7 +732,21 @@ std::shared_ptr<void> ttnnToFlatbuffer(Operation *op) {
auto result = mlir::tt::ttnn::emitTTNNAsCpp(module, os);
(void)result;

auto debugInfo = ::tt::target::CreateDebugInfoDirect(fbb, mlir, cpp.c_str());
std::vector<::flatbuffers::Offset<::tt::target::GoldenKV>> goldenKVList;

for (auto element : goldenMap) {
std::vector<std::uint8_t> dataTensor = element.second.convertDataToVector();
auto goldenTensor = ::tt::target::CreateGoldenTensorDirect(
fbb, element.second.name.c_str(), &element.second.shape,
&element.second.strides, element.second.dtype, &dataTensor);
auto goldenKV = ::tt::target::CreateGoldenKVDirect(
fbb, element.first.c_str(), goldenTensor);
goldenKVList.push_back(goldenKV);
}

auto goldenInfo = ::tt::target::CreateGoldenInfoDirect(fbb, &goldenKVList);
auto debugInfo =
::tt::target::CreateDebugInfoDirect(fbb, mlir, cpp.c_str(), goldenInfo);

std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs;
module->walk([&](func::FuncOp func) {
Expand Down Expand Up @@ -756,8 +774,10 @@ std::shared_ptr<void> ttnnToFlatbuffer(Operation *op) {
return bufferPtr;
}

LogicalResult translateTTNNToFlatbuffer(Operation *op, llvm::raw_ostream &os) {
std::shared_ptr<void> data = ttnnToFlatbuffer(op);
LogicalResult translateTTNNToFlatbuffer(
Operation *op, llvm::raw_ostream &os,
std::unordered_map<std::string, GoldenTensor> goldenMap) {
std::shared_ptr<void> data = ttnnToFlatbuffer(op, goldenMap);
std::size_t size = ::flatbuffers::GetSizePrefixedBufferLength(
static_cast<const uint8_t *>(data.get()));
os.write(reinterpret_cast<char const *>(data.get()), size);
Expand Down
5 changes: 4 additions & 1 deletion lib/Target/TTNN/TTNNToFlatbufferRegistration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,10 @@ namespace mlir::tt::ttnn {
void registerTTNNToFlatbuffer() {
TranslateFromMLIRRegistration reg(
"ttnn-to-flatbuffer", "translate ttnn to flatbuffer",
translateTTNNToFlatbuffer /* function */, [](DialectRegistry &registry) {
[](Operation *op, llvm::raw_ostream &os) -> LogicalResult {
return translateTTNNToFlatbuffer(op, os, {});
},
[](DialectRegistry &registry) {
// clang-format off
registry.insert<mlir::tt::TTDialect,
mlir::tt::ttnn::TTNNDialect,
Expand Down
33 changes: 28 additions & 5 deletions python/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,8 @@ void populatePassesModule(py::module &m) {
});

m.def("ttnn_to_flatbuffer_file",
[](MlirModule module, std::string &filepath) {
[](MlirModule module, std::string &filepath,
std::unordered_map<std::string, mlir::tt::GoldenTensor> goldenMap) {
mlir::Operation *moduleOp = unwrap(mlirModuleGetOperation(module));

std::error_code fileError;
Expand All @@ -178,15 +179,16 @@ void populatePassesModule(py::module &m) {
". Error: " + fileError.message());
}

if (mlir::failed(
mlir::tt::ttnn::translateTTNNToFlatbuffer(moduleOp, file))) {
if (mlir::failed(mlir::tt::ttnn::translateTTNNToFlatbuffer(
moduleOp, file, goldenMap))) {
throw std::runtime_error("Failed to write flatbuffer to file: " +
filepath);
}
});

m.def("ttmetal_to_flatbuffer_file",
[](MlirModule module, std::string &filepath) {
[](MlirModule module, std::string &filepath,
std::unordered_map<std::string, mlir::tt::GoldenTensor> goldenMap) {
mlir::Operation *moduleOp = unwrap(mlirModuleGetOperation(module));
std::error_code fileError;
llvm::raw_fd_ostream file(filepath, fileError);
Expand All @@ -195,11 +197,32 @@ void populatePassesModule(py::module &m) {
". Error: " + fileError.message());
}
if (mlir::failed(mlir::tt::ttmetal::translateTTMetalToFlatbuffer(
moduleOp, file))) {
moduleOp, file, goldenMap))) {
throw std::runtime_error("Failed to write flatbuffer to file: " +
filepath);
}
});

py::enum_<::tt::target::DataType>(m, "DataType")
.value("Float32", ::tt::target::DataType::Float32)
.value("Float16", ::tt::target::DataType::Float16);

py::class_<mlir::tt::GoldenTensor>(m, "GoldenTensor")
.def(py::init<std::string, std::vector<int64_t>, std::vector<int64_t>,
::tt::target::DataType, std::uint8_t *>())
.def_readwrite("name", &mlir::tt::GoldenTensor::name)
.def_readwrite("shape", &mlir::tt::GoldenTensor::shape)
.def_readwrite("strides", &mlir::tt::GoldenTensor::strides)
.def_readwrite("dtype", &mlir::tt::GoldenTensor::dtype)
.def_readwrite("data", &mlir::tt::GoldenTensor::data);

m.def("create_golden_tensor",
[](std::string name, std::vector<int64_t> shape,
std::vector<int64_t> strides, ::tt::target::DataType dtype,
std::uintptr_t ptr) {
return mlir::tt::GoldenTensor(name, shape, strides, dtype,
reinterpret_cast<std::uint8_t *>(ptr));
});
}

} // namespace mlir::ttmlir::python
12 changes: 5 additions & 7 deletions python/test_infra/test_ttir_ops_ttmetal.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,15 @@

from ttmlir.test_utils import (
compile_as_mlir_module,
translate_ttnn_to_flatbuffer,
ttir_to_ttnn,
translate_ttmetal_to_flatbuffer,
ttmetal_to_flatbuffer,
ttir_to_ttmetal,
)
from ttmlir.ttir_builder import Operand, TTIRBuilder

system_desc_path = os.getenv("SYSTEM_DESC_PATH", "")


@translate_ttmetal_to_flatbuffer(output_file_name="test_exp.ttm")
@ttmetal_to_flatbuffer(output_file_name="test_exp.ttm")
@ttir_to_ttmetal(
output_file_name="test_exp.mlir", system_desc_path=f"{system_desc_path}"
)
Expand All @@ -28,7 +26,7 @@ def test_exp_ttmetal(in0: Operand, builder: TTIRBuilder):
return builder.exp(in0)


@translate_ttmetal_to_flatbuffer(output_file_name="test_add.ttm")
@ttmetal_to_flatbuffer(output_file_name="test_add.ttm")
@ttir_to_ttmetal(
output_file_name="test_add.mlir", system_desc_path=f"{system_desc_path}"
)
Expand All @@ -37,7 +35,7 @@ def test_add_ttmetal(in0: Operand, in1: Operand, builder: TTIRBuilder):
return builder.add(in0, in1)


@translate_ttmetal_to_flatbuffer(output_file_name="test_multiply.ttm")
@ttmetal_to_flatbuffer(output_file_name="test_multiply.ttm")
@ttir_to_ttmetal(
output_file_name="test_multiply.mlir", system_desc_path=f"{system_desc_path}"
)
Expand All @@ -46,7 +44,7 @@ def test_multiply_ttmetal(in0: Operand, in1: Operand, builder: TTIRBuilder):
return builder.multiply(in0, in1)


@translate_ttmetal_to_flatbuffer(output_file_name="test_arbitrary_op_chain.ttm")
@ttmetal_to_flatbuffer(output_file_name="test_arbitrary_op_chain.ttm")
@ttir_to_ttmetal(
output_file_name="test_arbitrary_op_chain.mlir",
system_desc_path=f"{system_desc_path}",
Expand Down
Loading

0 comments on commit a72ff16

Please sign in to comment.