From 282a371567c9766b97bc66f2d4aea914377d28bc Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 13 Nov 2024 21:01:18 +0000 Subject: [PATCH 01/22] Added maybe_downcast & hardened TT Attrs and Types to include better support --- include/ttmlir-c/TTAttrs.h | 8 + lib/CAPI/TTAttrs.cpp | 30 +++ python/TTModule.cpp | 499 +++++++++++++++++++++++++++++++------ python/TTNNModule.cpp | 90 ++++++- 4 files changed, 543 insertions(+), 84 deletions(-) diff --git a/include/ttmlir-c/TTAttrs.h b/include/ttmlir-c/TTAttrs.h index 9b31b36ed6..cdaa67c185 100644 --- a/include/ttmlir-c/TTAttrs.h +++ b/include/ttmlir-c/TTAttrs.h @@ -75,6 +75,14 @@ MLIR_CAPI_EXPORTED MlirAttribute ttmlirTTOperandConstraintArrayAttrGet( MlirContext ctx, uint32_t *OperandConstraints, size_t OperandConstraintsSize); +MLIR_CAPI_EXPORTED MlirAttribute ttmlirTTTileSizeAttrGet(MlirContext ctx, + int64_t y, int64_t x); + +MLIR_CAPI_EXPORTED MlirAttribute ttmlirTTChipPhysicalCoresAttrGet( + MlirContext ctx, MlirAttribute *worker, size_t workerSize, + MlirAttribute *dram, size_t dramSize, MlirAttribute *eth, size_t ethSize, + MlirAttribute *eth_inactive, size_t eth_inactiveSize); + #ifdef __cplusplus } #endif diff --git a/lib/CAPI/TTAttrs.cpp b/lib/CAPI/TTAttrs.cpp index 8f2949852d..b4bdc0e31a 100644 --- a/lib/CAPI/TTAttrs.cpp +++ b/lib/CAPI/TTAttrs.cpp @@ -182,4 +182,34 @@ ttmlirTTOperandConstraintArrayAttrGet(MlirContext ctx, return wrap(ArrayAttr::get(unwrap(ctx), operandConstraintsArray)); } +MlirAttribute ttmlirTTTileSizeAttrGet(MlirContext ctx, int64_t y, int64_t x) { + return wrap(TileSizeAttr::get(unwrap(ctx), y, x)); +} + +MlirAttribute ttmlirTTChipPhysicalCoresAttrGet( + MlirContext ctx, MlirAttribute *worker, size_t workerSize, + MlirAttribute *dram, size_t dramSize, MlirAttribute *eth, size_t ethSize, + MlirAttribute *eth_inactive, size_t eth_inactiveSize) { + std::vector workerVec, dramVec, ethVec, ethInactiveVec; + for (size_t i = 0; i < workerSize; i++) { + workerVec.push_back(mlir::cast(unwrap(worker[i]))); + } + + for (size_t i = 0; i < dramSize; i++) { + dramVec.push_back(mlir::cast(unwrap(dram[i]))); + } + + for (size_t i = 0; i < ethSize; i++) { + ethVec.push_back(mlir::cast(unwrap(eth[i]))); + } + + for (size_t i = 0; i < eth_inactiveSize; i++) { + ethInactiveVec.push_back( + mlir::cast(unwrap(eth_inactive[i]))); + } + + return wrap(ChipPhysicalCoresAttr::get(unwrap(ctx), workerVec, dramVec, + ethVec, ethInactiveVec)); +} + } // namespace mlir::tt diff --git a/python/TTModule.cpp b/python/TTModule.cpp index 9cdc49f278..65120faa76 100644 --- a/python/TTModule.cpp +++ b/python/TTModule.cpp @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 #include +#include #include #include "ttmlir/Bindings/Python/TTMLIRModule.h" @@ -11,7 +12,12 @@ #include "mlir/CAPI/IR.h" #include "ttmlir/Dialect/TT/IR/TTOpsTypes.h" -#include "ttmlir/Target/Common/Target.h" + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcovered-switch-default" +#include "ttmlir/Target/Common/types_generated.h" +#pragma clang diagnostic pop + #include "ttmlir/Utils.h" namespace mlir::ttmlir::python { @@ -108,26 +114,77 @@ void populateTTModule(py::module &m) { [](MlirContext ctx, std::vector shape) { return wrap(tt::GridAttr::get(unwrap(ctx), shape)); }) - .def_property_readonly("shape", [](tt::GridAttr const &ga) { - return std::vector(ga.getShape().begin(), ga.getShape().end()); - }); + .def_static( + "maybe_downcast", + [](MlirAttribute attr) -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly( + "shape", [](tt::GridAttr const &ga) { return ga.getShape().vec(); }); py::class_(m, "ChipCapabilityAttr") - .def_static("get", [](MlirContext ctx, uint32_t chipCapability) { - return wrap(tt::ChipCapabilityAttr::get( - unwrap(ctx), static_cast(chipCapability))); - }); + .def_static( + "get", + [](MlirContext ctx, uint32_t chipCapability) { + return wrap(tt::ChipCapabilityAttr::get( + unwrap(ctx), static_cast(chipCapability))); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("capability_as_int", + [](tt::ChipCapabilityAttr self) { + return static_cast(self.getValue()); + }); py::class_(m, "ArchAttr") - .def_static("get", [](MlirContext ctx, uint32_t arch) { - return wrap( - tt::ArchAttr::get(unwrap(ctx), static_cast(arch))); + .def_static("get", + [](MlirContext ctx, uint32_t arch) { + return wrap(tt::ArchAttr::get(unwrap(ctx), + static_cast(arch))); + }) + .def_static( + "maybe_downcast", + [](MlirAttribute attr) -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("arch_as_int", [](tt::ArchAttr self) { + return static_cast(self.getValue()); }); py::class_(m, "DataTypeAttr") - .def_static("get", [](MlirContext ctx, uint16_t *supportedDataTypes) { - return wrap(tt::DataTypeAttr::get( - unwrap(ctx), static_cast(*supportedDataTypes))); + .def_static( + "get", + [](MlirContext ctx, uint16_t *supportedDataTypes) { + return wrap(tt::DataTypeAttr::get( + unwrap(ctx), static_cast(*supportedDataTypes))); + }) + .def_static( + "maybe_downcast", + [](MlirAttribute attr) -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("data_type_as_int", [](tt::DataTypeAttr self) { + return static_cast(self.getValue()); }); py::class_(m, "ChipDescAttr") @@ -152,84 +209,304 @@ void populateTTModule(py::module &m) { mlir::cast(unwrap(supportedDataTypes)), mlir::cast(unwrap(supportedTileSizes)), numCBs)); - }); + }) + .def_static( + "maybe_downcast", + [](MlirAttribute attr) -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("usable_l1_size", + &tt::ChipDescAttr::getUsableL1Size) + .def_property_readonly("usable_dram_channel_size", + &tt::ChipDescAttr::getUsableDramChannelSize) + .def_property_readonly("arch", &tt::ChipDescAttr::getArch) + .def_property_readonly( + "grid", [](tt::ChipDescAttr self) { return self.getGrid().vec(); }) + .def_property_readonly("l1_size", &tt::ChipDescAttr::getL1Size) + .def_property_readonly("num_dram_channels", + &tt::ChipDescAttr::getNumDramChannels) + .def_property_readonly("dram_channel_size", + &tt::ChipDescAttr::getDramChannelSize) + .def_property_readonly("noc_l1_address_align_bytes", + &tt::ChipDescAttr::getNocL1AddressAlignBytes) + .def_property_readonly("pcie_address_align_bytes", + &tt::ChipDescAttr::getPcieAddressAlignBytes) + .def_property_readonly("noc_dram_address_align_bytes", + &tt::ChipDescAttr::getNocDRAMAddressAlignBytes) + .def_property_readonly("l1_unreserved_base", + &tt::ChipDescAttr::getL1UnreservedBase) + .def_property_readonly("erisc_l1_unreserved_base", + &tt::ChipDescAttr::getEriscL1UnreservedBase) + .def_property_readonly("dram_unreserved_base", + &tt::ChipDescAttr::getDramUnreservedBase) + .def_property_readonly("dram_unreserved_end", + &tt::ChipDescAttr::getDramUnreservedEnd) + .def_property_readonly("chip_physical_cores", + &tt::ChipDescAttr::getChipPhysicalCores) + .def_property_readonly("supported_data_types", + [](tt::ChipDescAttr self) { + return self.getSupportedDataTypes().vec(); + }) + .def_property_readonly("supported_tile_sizes", + [](tt::ChipDescAttr self) { + return self.getSupportedTileSizes().vec(); + }) + .def_property_readonly("num_cbs", &tt::ChipDescAttr::getNumCBs); + + py::class_(m, "TileSizeAttr") + .def_static("get", + [](MlirContext ctx, int64_t y, int64_t x) { + return wrap(tt::TileSizeAttr::get(unwrap(ctx), y, x)); + }) + .def_static( + "maybe_downcast", + [](MlirAttribute attr) -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("y", &tt::TileSizeAttr::getY) + .def_property_readonly("x", &tt::TileSizeAttr::getX); + + py::class_(m, "ChipPhysicalCoresAttr") + .def_static("get", + [](MlirContext ctx, std::vector worker, + std::vector dram, + std::vector eth, + std::vector eth_inactive) { + return wrap(tt::ChipPhysicalCoresAttr::get( + unwrap(ctx), worker, dram, eth, eth_inactive)); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly( + "worker", + [](tt::ChipPhysicalCoresAttr self) { return self.getWorker().vec(); }) + .def_property_readonly( + "dram", + [](tt::ChipPhysicalCoresAttr self) { return self.getDram().vec(); }) + .def_property_readonly( + "eth", + [](tt::ChipPhysicalCoresAttr self) { return self.getEth().vec(); }) + .def_property_readonly("eth_inactive", + [](tt::ChipPhysicalCoresAttr self) { + return self.getEthInactive().vec(); + }); py::class_(m, "ChipCoordAttr") - .def_static("get", [](MlirContext ctx, unsigned rack, unsigned shelf, - unsigned y, unsigned x) { - return wrap(tt::ChipCoordAttr::get(unwrap(ctx), rack, shelf, y, x)); - }); + .def_static("get", + [](MlirContext ctx, unsigned rack, unsigned shelf, unsigned y, + unsigned x) { + return wrap( + tt::ChipCoordAttr::get(unwrap(ctx), rack, shelf, y, x)); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("rack", &tt::ChipCoordAttr::getRack) + .def_property_readonly("shelf", &tt::ChipCoordAttr::getShelf) + .def_property_readonly("y", &tt::ChipCoordAttr::getY) + .def_property_readonly("x", &tt::ChipCoordAttr::getX); py::class_(m, "ChipChannelAttr") - .def_static("get", [](MlirContext ctx, unsigned deviceId0, - std::vector ethernetCoreCoord0, - unsigned deviceId1, - std::vector ethernetCoreCoord1) { - return wrap(tt::ChipChannelAttr::get(unwrap(ctx), deviceId0, - ethernetCoreCoord0, deviceId1, - ethernetCoreCoord1)); - }); + .def_static( + "get", + [](MlirContext ctx, unsigned deviceId0, + std::vector ethernetCoreCoord0, unsigned deviceId1, + std::vector ethernetCoreCoord1) { + return wrap(tt::ChipChannelAttr::get(unwrap(ctx), deviceId0, + ethernetCoreCoord0, deviceId1, + ethernetCoreCoord1)); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("device_id0", &tt::ChipChannelAttr::getDeviceId0) + .def_property_readonly("ethernet_core_coord0", + [](tt::ChipChannelAttr self) { + return self.getEthernetCoreCoord0().vec(); + }) + .def_property_readonly("device_id1", &tt::ChipChannelAttr::getDeviceId1) + .def_property_readonly("ethernet_core_coord1", + [](tt::ChipChannelAttr self) { + return self.getEthernetCoreCoord1().vec(); + }); py::class_(m, "SystemDescAttr") .def_static("get_default", [](MlirContext ctx) { return wrap(tt::SystemDescAttr::getDefault(unwrap(ctx))); }) - .def_static("get", [](MlirContext ctx, - std::vector chipDescs, - std::vector chipDescIndices, - std::vector chipCapabilities, - std::vector chipCoords, - std::vector chipChannels) { - std::vector chipDescsUnwrapped; - for (auto chipDesc : chipDescs) { - chipDescsUnwrapped.push_back( - mlir::cast(unwrap(chipDesc))); - } - std::vector chipCapabilitiesUnwrapped; - for (auto chipCapability : chipCapabilities) { - chipCapabilitiesUnwrapped.push_back( - mlir::cast(unwrap(chipCapability))); - } - std::vector chipCoordsUnwrapped; - for (auto chipCoord : chipCoords) { - chipCoordsUnwrapped.push_back( - mlir::cast(unwrap(chipCoord))); - } - std::vector chipChannelsUnwrapped; - for (auto chipChannel : chipChannels) { - chipChannelsUnwrapped.push_back( - mlir::cast(unwrap(chipChannel))); - } - return wrap(tt::SystemDescAttr::get( - unwrap(ctx), chipDescsUnwrapped, chipDescIndices, - chipCapabilitiesUnwrapped, chipCoordsUnwrapped, - chipChannelsUnwrapped)); + .def_static( + "get", + [](MlirContext ctx, std::vector chipDescs, + std::vector chipDescIndices, + std::vector chipCapabilities, + std::vector chipCoords, + std::vector chipChannels) { + std::vector chipDescsUnwrapped; + for (auto chipDesc : chipDescs) { + chipDescsUnwrapped.push_back( + mlir::cast(unwrap(chipDesc))); + } + std::vector chipCapabilitiesUnwrapped; + for (auto chipCapability : chipCapabilities) { + chipCapabilitiesUnwrapped.push_back( + mlir::cast(unwrap(chipCapability))); + } + std::vector chipCoordsUnwrapped; + for (auto chipCoord : chipCoords) { + chipCoordsUnwrapped.push_back( + mlir::cast(unwrap(chipCoord))); + } + std::vector chipChannelsUnwrapped; + for (auto chipChannel : chipChannels) { + chipChannelsUnwrapped.push_back( + mlir::cast(unwrap(chipChannel))); + } + return wrap(tt::SystemDescAttr::get( + unwrap(ctx), chipDescsUnwrapped, chipDescIndices, + chipCapabilitiesUnwrapped, chipCoordsUnwrapped, + chipChannelsUnwrapped)); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly( + "chip_descs", + [](tt::SystemDescAttr self) { return self.getChipDescs().vec(); }) + .def_property_readonly("chip_desc_indices", + [](tt::SystemDescAttr self) { + return self.getChipDescIndices().vec(); + }) + .def_property_readonly("chip_capabilities", + [](tt::SystemDescAttr self) { + return self.getChipCapabilities().vec(); + }) + .def_property_readonly( + "chip_coords", + [](tt::SystemDescAttr self) { return self.getChipCoords().vec(); }) + .def_property_readonly("chip_channels", [](tt::SystemDescAttr self) { + return self.getChipChannels().vec(); }); py::class_(m, "MemorySpaceAttr") - .def_static("get", [](MlirContext ctx, uint32_t memorySpace) { - return wrap(tt::MemorySpaceAttr::get( - unwrap(ctx), static_cast(memorySpace))); - }); + .def_static( + "get", + [](MlirContext ctx, uint32_t memorySpace) { + return wrap(tt::MemorySpaceAttr::get( + unwrap(ctx), static_cast(memorySpace))); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("memory_space_as_int", + [](tt::MemorySpaceAttr self) { + return static_cast(self.getValue()); + }); py::class_(m, "OOBValAttr") - .def_static("get", [](MlirContext ctx, uint32_t oobVal) { - return wrap( - tt::OOBValAttr::get(unwrap(ctx), static_cast(oobVal))); + .def_static("get", + [](MlirContext ctx, uint32_t oobVal) { + return wrap(tt::OOBValAttr::get( + unwrap(ctx), static_cast(oobVal))); + }) + .def_static( + "maybe_downcast", + [](MlirAttribute attr) -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("oob_val_as_int", [](tt::OOBValAttr self) { + return static_cast(self.getValue()); }); py::class_(m, "TensorMemoryLayoutAttr") - .def_static("get", [](MlirContext ctx, uint32_t memLayout) { - return wrap(tt::TensorMemoryLayoutAttr::get( - unwrap(ctx), static_cast(memLayout))); - }); + .def_static( + "get", + [](MlirContext ctx, uint32_t memLayout) { + return wrap(tt::TensorMemoryLayoutAttr::get( + unwrap(ctx), static_cast(memLayout))); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = mlir::dyn_cast( + unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("mem_layout_as_int", + [](tt::TensorMemoryLayoutAttr self) { + return static_cast(self.getValue()); + }); py::class_(m, "IteratorTypeAttr") - .def_static("get", [](MlirContext ctx, uint32_t iteratorType) { - return wrap(tt::IteratorTypeAttr::get( - unwrap(ctx), static_cast(iteratorType))); - }); + .def_static( + "get", + [](MlirContext ctx, uint32_t iteratorType) { + return wrap(tt::IteratorTypeAttr::get( + unwrap(ctx), static_cast(iteratorType))); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("iterator_type_as_int", + [](tt::IteratorTypeAttr self) { + return static_cast(self.getValue()); + }); py::class_(m, "OperandConstraintAttr") .def_static("get", @@ -238,16 +515,45 @@ void populateTTModule(py::module &m) { unwrap(ctx), static_cast(operandConstraint))); }) - .def_static("get", [](MlirContext ctx, - std::vector attributesArray) { - return ::ttmlir::utils::wrapArrayOfMlirAttributesAsAttribute( - ctx, attributesArray); - }); + .def_static( + "get", + [](MlirContext ctx, std::vector attributesArray) { + return ::ttmlir::utils::wrapArrayOfMlirAttributesAsAttribute( + ctx, attributesArray); + }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("operand_constraint_as_int", + [](tt::OperandConstraintAttr self) { + return static_cast(self.getValue()); + }); py::class_(m, "DeviceType") - .def_static("get", [](MlirContext ctx, MlirAttribute deviceAttr) { - return wrap(tt::DeviceType::get( - unwrap(ctx), mlir::cast(unwrap(deviceAttr)))); + .def_static( + "get", + [](MlirContext ctx, MlirAttribute deviceAttr) { + return wrap(tt::DeviceType::get( + unwrap(ctx), mlir::cast(unwrap(deviceAttr)))); + }) + .def_static( + "maybe_downcast", + [](MlirType type) -> std::variant { + auto res = mlir::dyn_cast(unwrap(type)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly("device_attr", [](tt::DeviceType const &self) { + return self.getDesc(); }); py::class_(m, "DeviceAttr") @@ -270,9 +576,28 @@ void populateTTModule(py::module &m) { unwrap(workerGridMapping)), unwrap(l1Map), unwrap(dramMap), meshShape, chipIds)); }) - .def("unwrap", [](MlirAttribute const &self) { - return mlir::cast(unwrap(self)); - }); + .def("unwrap", + [](MlirAttribute const &self) { + return mlir::cast(unwrap(self)); + }) + .def_property_readonly("grid_attr", &tt::DeviceAttr::getWorkerGrid) + .def_property_readonly("l1_map", &tt::DeviceAttr::getL1Map) + .def_property_readonly("dram_map", &tt::DeviceAttr::getDramMap) + .def_property_readonly( + "mesh_shape", + [](tt::DeviceAttr const &self) { return self.getMeshShape().vec(); }) + .def_property_readonly( + "chip_ids", + [](tt::DeviceAttr const &self) { return self.getChipIds().vec(); }) + .def_static( + "maybe_downcast", + [](MlirAttribute attr) -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }); py::class_(m, "TileType") .def_static("get", @@ -282,6 +607,14 @@ void populateTTModule(py::module &m) { unwrap(ctx), SmallVector{height, width}, static_cast(dataType))); }) + .def_static("maybe_downcast", + [](MlirType type) -> std::variant { + auto res = mlir::dyn_cast(unwrap(type)); + if (res) + return res; + else + return py::none(); + }) .def_property_readonly("data_type", &tt::TileType::getDataType) .def_property_readonly("shape", [](tt::TileType const &tile) { return std::vector({tile.getHeight(), tile.getWidth()}); diff --git a/python/TTNNModule.cpp b/python/TTNNModule.cpp index b8b408cb93..73c5fbc43b 100644 --- a/python/TTNNModule.cpp +++ b/python/TTNNModule.cpp @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 #include "ttmlir/Bindings/Python/TTMLIRModule.h" +#include namespace mlir::ttmlir::python { void populateTTNNModule(py::module &m) { @@ -27,13 +28,39 @@ void populateTTNNModule(py::module &m) { offsetVec)); }, py::arg("ctx"), py::arg("grid"), - py::arg("offset") = std::vector{0, 0}); + py::arg("offset") = std::vector{0, 0}) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) + .def_property_readonly( + "offset", + [](tt::ttnn::CoreRangeAttr self) { return self.getOffset().vec(); }) + .def_property_readonly("size", [](tt::ttnn::CoreRangeAttr self) { + return self.getSize().vec(); + }); py::class_(m, "LayoutAttr") .def_static("get", [](MlirContext ctx, uint32_t layout) { return wrap(tt::ttnn::LayoutAttr::get( unwrap(ctx), static_cast(layout))); }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) .def_property_readonly("value", [](tt::ttnn::LayoutAttr self) { return static_cast(self.getValue()); }); @@ -44,6 +71,17 @@ void populateTTNNModule(py::module &m) { unwrap(ctx), static_cast( tensorMemoryLayout))); }) + .def_static( + "maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) .def_property_readonly("value", [](tt::ttnn::TensorMemoryLayoutAttr self) { return static_cast(self.getValue()); @@ -55,6 +93,16 @@ void populateTTNNModule(py::module &m) { return wrap(tt::ttnn::BufferTypeAttr::get( unwrap(ctx), static_cast(bufferType))); }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) .def_property_readonly("value", [](tt::ttnn::BufferTypeAttr self) { return static_cast(self.getValue()); }); @@ -64,6 +112,16 @@ void populateTTNNModule(py::module &m) { return wrap( tt::ttnn::ShardSpecAttr::get(unwrap(ctx), shardShape)); }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) .def_property_readonly("shard_shape", &tt::ttnn::ShardSpecAttr::getShardShape); py::class_(m, "MemoryConfigAttr") @@ -91,6 +149,16 @@ void populateTTNNModule(py::module &m) { unwrap(ctx), tt::ttnn::ShapeAttr::get(unwrap(ctx), shardShape)))); }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = mlir::dyn_cast( + unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) .def_property_readonly("tensor_memory_layout", &tt::ttnn::MemoryConfigAttr::getTensorMemoryLayout) .def_property_readonly("buffer_type", @@ -102,6 +170,16 @@ void populateTTNNModule(py::module &m) { [](MlirContext ctx, std::vector shape) { return wrap(tt::ttnn::ShapeAttr::get(unwrap(ctx), shape)); }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) .def_property_readonly("shape", [](tt::ttnn::ShapeAttr self) { return std::vector(self.getShape().begin(), self.getShape().end()); @@ -112,6 +190,16 @@ void populateTTNNModule(py::module &m) { return wrap( tt::ttnn::MeshShapeAttr::get(unwrap(ctx), y, x)); }) + .def_static("maybe_downcast", + [](MlirAttribute attr) + -> std::variant { + auto res = + mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }) .def_property_readonly("y", &tt::ttnn::MeshShapeAttr::getY) .def_property_readonly("x", &tt::ttnn::MeshShapeAttr::getX); } From 677b0d629de068462685eebfeb09e785d57cbf69 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 14 Nov 2024 20:20:03 +0000 Subject: [PATCH 02/22] Removed manual maybe_downcast, added tt_class --- include/ttmlir/Bindings/Python/TTMLIRModule.h | 31 +++ python/CMakeLists.txt | 1 + python/TTModule.cpp | 212 ++---------------- python/TTNNModule.cpp | 104 ++------- 4 files changed, 69 insertions(+), 279 deletions(-) diff --git a/include/ttmlir/Bindings/Python/TTMLIRModule.h b/include/ttmlir/Bindings/Python/TTMLIRModule.h index e0d089edd9..d6418d02a2 100644 --- a/include/ttmlir/Bindings/Python/TTMLIRModule.h +++ b/include/ttmlir/Bindings/Python/TTMLIRModule.h @@ -21,9 +21,40 @@ #include "ttmlir/RegisterAll.h" #include "llvm/Support/CommandLine.h" +#include + namespace py = pybind11; namespace mlir::ttmlir::python { + +template +py::class_ tt_attribute_class(py::module &m, const char *class_name) { + py::class_ cls(m, class_name); + cls.def_static("maybe_downcast", + [](MlirAttribute attr) -> std::variant { + auto res = mlir::dyn_cast(unwrap(attr)); + if (res) + return res; + else + return py::none(); + }); + return cls; +} + +template +py::class_ tt_type_class(py::module &m, const char *class_name) { + py::class_ cls(m, class_name); + cls.def_static("maybe_downcast", + [](MlirType type) -> std::variant { + auto res = mlir::dyn_cast(unwrap(type)); + if (res) + return res; + else + return py::none(); + }); + return cls; +} + void populateTTModule(py::module &m); void populateTTIRModule(py::module &m); void populateTTKernelModule(py::module &m); diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 7309463628..e43cb858d4 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -102,6 +102,7 @@ declare_mlir_python_extension(TTMLIRPythonExtensions.Main set(TTMLIR_PYTHON_SOURCES MLIRPythonSources.Core + MLIRPythonSources.Dialects.affine MLIRPythonSources.Dialects.arith MLIRPythonSources.Dialects.func MLIRPythonSources.Dialects.tensor diff --git a/python/TTModule.cpp b/python/TTModule.cpp index 65120faa76..6ce9d074bf 100644 --- a/python/TTModule.cpp +++ b/python/TTModule.cpp @@ -22,7 +22,7 @@ namespace mlir::ttmlir::python { void populateTTModule(py::module &m) { - py::class_(m, "LayoutAttr") + tt_attribute_class(m, "LayoutAttr") .def_static("get", [](MlirContext ctx, MlirType rankedTensorType, uint32_t memorySpaceValue, MlirAttribute grid, @@ -109,85 +109,48 @@ void populateTTModule(py::module &m) { return static_cast(la.getMemLayout()); }); - py::class_(m, "GridAttr") + tt_attribute_class(m, "GridAttr") .def_static("get", [](MlirContext ctx, std::vector shape) { return wrap(tt::GridAttr::get(unwrap(ctx), shape)); }) - .def_static( - "maybe_downcast", - [](MlirAttribute attr) -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly( "shape", [](tt::GridAttr const &ga) { return ga.getShape().vec(); }); - py::class_(m, "ChipCapabilityAttr") + tt_attribute_class(m, "ChipCapabilityAttr") .def_static( "get", [](MlirContext ctx, uint32_t chipCapability) { return wrap(tt::ChipCapabilityAttr::get( unwrap(ctx), static_cast(chipCapability))); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("capability_as_int", [](tt::ChipCapabilityAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "ArchAttr") + tt_attribute_class(m, "ArchAttr") .def_static("get", [](MlirContext ctx, uint32_t arch) { return wrap(tt::ArchAttr::get(unwrap(ctx), static_cast(arch))); }) - .def_static( - "maybe_downcast", - [](MlirAttribute attr) -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("arch_as_int", [](tt::ArchAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "DataTypeAttr") + tt_attribute_class(m, "DataTypeAttr") .def_static( "get", [](MlirContext ctx, uint16_t *supportedDataTypes) { return wrap(tt::DataTypeAttr::get( unwrap(ctx), static_cast(*supportedDataTypes))); }) - .def_static( - "maybe_downcast", - [](MlirAttribute attr) -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("data_type_as_int", [](tt::DataTypeAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "ChipDescAttr") + tt_attribute_class(m, "ChipDescAttr") .def_static( "get", [](MlirContext ctx, MlirAttribute arch, std::vector grid, @@ -210,15 +173,6 @@ void populateTTModule(py::module &m) { mlir::cast(unwrap(supportedTileSizes)), numCBs)); }) - .def_static( - "maybe_downcast", - [](MlirAttribute attr) -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("usable_l1_size", &tt::ChipDescAttr::getUsableL1Size) .def_property_readonly("usable_dram_channel_size", @@ -257,24 +211,15 @@ void populateTTModule(py::module &m) { }) .def_property_readonly("num_cbs", &tt::ChipDescAttr::getNumCBs); - py::class_(m, "TileSizeAttr") + tt_attribute_class(m, "TileSizeAttr") .def_static("get", [](MlirContext ctx, int64_t y, int64_t x) { return wrap(tt::TileSizeAttr::get(unwrap(ctx), y, x)); }) - .def_static( - "maybe_downcast", - [](MlirAttribute attr) -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("y", &tt::TileSizeAttr::getY) .def_property_readonly("x", &tt::TileSizeAttr::getX); - py::class_(m, "ChipPhysicalCoresAttr") + tt_attribute_class(m, "ChipPhysicalCoresAttr") .def_static("get", [](MlirContext ctx, std::vector worker, std::vector dram, @@ -283,16 +228,6 @@ void populateTTModule(py::module &m) { return wrap(tt::ChipPhysicalCoresAttr::get( unwrap(ctx), worker, dram, eth, eth_inactive)); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly( "worker", [](tt::ChipPhysicalCoresAttr self) { return self.getWorker().vec(); }) @@ -307,28 +242,19 @@ void populateTTModule(py::module &m) { return self.getEthInactive().vec(); }); - py::class_(m, "ChipCoordAttr") + tt_attribute_class(m, "ChipCoordAttr") .def_static("get", [](MlirContext ctx, unsigned rack, unsigned shelf, unsigned y, unsigned x) { return wrap( tt::ChipCoordAttr::get(unwrap(ctx), rack, shelf, y, x)); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("rack", &tt::ChipCoordAttr::getRack) .def_property_readonly("shelf", &tt::ChipCoordAttr::getShelf) .def_property_readonly("y", &tt::ChipCoordAttr::getY) .def_property_readonly("x", &tt::ChipCoordAttr::getX); - py::class_(m, "ChipChannelAttr") + tt_attribute_class(m, "ChipChannelAttr") .def_static( "get", [](MlirContext ctx, unsigned deviceId0, @@ -338,16 +264,6 @@ void populateTTModule(py::module &m) { ethernetCoreCoord0, deviceId1, ethernetCoreCoord1)); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("device_id0", &tt::ChipChannelAttr::getDeviceId0) .def_property_readonly("ethernet_core_coord0", [](tt::ChipChannelAttr self) { @@ -359,7 +275,7 @@ void populateTTModule(py::module &m) { return self.getEthernetCoreCoord1().vec(); }); - py::class_(m, "SystemDescAttr") + tt_attribute_class(m, "SystemDescAttr") .def_static("get_default", [](MlirContext ctx) { return wrap(tt::SystemDescAttr::getDefault(unwrap(ctx))); @@ -396,15 +312,6 @@ void populateTTModule(py::module &m) { chipCapabilitiesUnwrapped, chipCoordsUnwrapped, chipChannelsUnwrapped)); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly( "chip_descs", [](tt::SystemDescAttr self) { return self.getChipDescs().vec(); }) @@ -423,92 +330,53 @@ void populateTTModule(py::module &m) { return self.getChipChannels().vec(); }); - py::class_(m, "MemorySpaceAttr") + tt_attribute_class(m, "MemorySpaceAttr") .def_static( "get", [](MlirContext ctx, uint32_t memorySpace) { return wrap(tt::MemorySpaceAttr::get( unwrap(ctx), static_cast(memorySpace))); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("memory_space_as_int", [](tt::MemorySpaceAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "OOBValAttr") + tt_attribute_class(m, "OOBValAttr") .def_static("get", [](MlirContext ctx, uint32_t oobVal) { return wrap(tt::OOBValAttr::get( unwrap(ctx), static_cast(oobVal))); }) - .def_static( - "maybe_downcast", - [](MlirAttribute attr) -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("oob_val_as_int", [](tt::OOBValAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "TensorMemoryLayoutAttr") + tt_attribute_class(m, "TensorMemoryLayoutAttr") .def_static( "get", [](MlirContext ctx, uint32_t memLayout) { return wrap(tt::TensorMemoryLayoutAttr::get( unwrap(ctx), static_cast(memLayout))); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = mlir::dyn_cast( - unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("mem_layout_as_int", [](tt::TensorMemoryLayoutAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "IteratorTypeAttr") + tt_attribute_class(m, "IteratorTypeAttr") .def_static( "get", [](MlirContext ctx, uint32_t iteratorType) { return wrap(tt::IteratorTypeAttr::get( unwrap(ctx), static_cast(iteratorType))); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("iterator_type_as_int", [](tt::IteratorTypeAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "OperandConstraintAttr") + tt_attribute_class(m, "OperandConstraintAttr") .def_static("get", [](MlirContext ctx, uint32_t operandConstraint) { return wrap(tt::OperandConstraintAttr::get( @@ -521,42 +389,23 @@ void populateTTModule(py::module &m) { return ::ttmlir::utils::wrapArrayOfMlirAttributesAsAttribute( ctx, attributesArray); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("operand_constraint_as_int", [](tt::OperandConstraintAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "DeviceType") + tt_type_class(m, "DeviceType") .def_static( "get", [](MlirContext ctx, MlirAttribute deviceAttr) { return wrap(tt::DeviceType::get( unwrap(ctx), mlir::cast(unwrap(deviceAttr)))); }) - .def_static( - "maybe_downcast", - [](MlirType type) -> std::variant { - auto res = mlir::dyn_cast(unwrap(type)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("device_attr", [](tt::DeviceType const &self) { return self.getDesc(); }); - py::class_(m, "DeviceAttr") + tt_attribute_class(m, "DeviceAttr") .def_static("from_system_desc", [](MlirContext ctx, MlirAttribute systemDesc, std::vector meshShape) { @@ -586,20 +435,11 @@ void populateTTModule(py::module &m) { .def_property_readonly( "mesh_shape", [](tt::DeviceAttr const &self) { return self.getMeshShape().vec(); }) - .def_property_readonly( - "chip_ids", - [](tt::DeviceAttr const &self) { return self.getChipIds().vec(); }) - .def_static( - "maybe_downcast", - [](MlirAttribute attr) -> std::variant { - auto res = mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }); + .def_property_readonly("chip_ids", [](tt::DeviceAttr const &self) { + return self.getChipIds().vec(); + }); - py::class_(m, "TileType") + tt_type_class(m, "TileType") .def_static("get", [](MlirContext ctx, std::int64_t height, std::int64_t width, uint32_t dataType) { @@ -607,14 +447,6 @@ void populateTTModule(py::module &m) { unwrap(ctx), SmallVector{height, width}, static_cast(dataType))); }) - .def_static("maybe_downcast", - [](MlirType type) -> std::variant { - auto res = mlir::dyn_cast(unwrap(type)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("data_type", &tt::TileType::getDataType) .def_property_readonly("shape", [](tt::TileType const &tile) { return std::vector({tile.getHeight(), tile.getWidth()}); diff --git a/python/TTNNModule.cpp b/python/TTNNModule.cpp index 73c5fbc43b..fcef42eb11 100644 --- a/python/TTNNModule.cpp +++ b/python/TTNNModule.cpp @@ -8,7 +8,7 @@ namespace mlir::ttmlir::python { void populateTTNNModule(py::module &m) { - py::class_(m, "CoreRangeAttr") + tt_attribute_class(m, "CoreRangeAttr") .def_static("get", [](MlirContext ctx, std::vector offset, std::vector size) { @@ -29,102 +29,56 @@ void populateTTNNModule(py::module &m) { }, py::arg("ctx"), py::arg("grid"), py::arg("offset") = std::vector{0, 0}) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly( "offset", [](tt::ttnn::CoreRangeAttr self) { return self.getOffset().vec(); }) .def_property_readonly("size", [](tt::ttnn::CoreRangeAttr self) { return self.getSize().vec(); }); - py::class_(m, "LayoutAttr") + + tt_attribute_class(m, "LayoutAttr") .def_static("get", [](MlirContext ctx, uint32_t layout) { return wrap(tt::ttnn::LayoutAttr::get( unwrap(ctx), static_cast(layout))); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("value", [](tt::ttnn::LayoutAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "TensorMemoryLayoutAttr") + + tt_attribute_class(m, + "TensorMemoryLayoutAttr") .def_static("get", [](MlirContext ctx, uint32_t tensorMemoryLayout) { return wrap(tt::ttnn::TensorMemoryLayoutAttr::get( unwrap(ctx), static_cast( tensorMemoryLayout))); }) - .def_static( - "maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("value", [](tt::ttnn::TensorMemoryLayoutAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "BufferTypeAttr") + tt_attribute_class(m, "BufferTypeAttr") .def_static( "get", [](MlirContext ctx, uint32_t bufferType) { return wrap(tt::ttnn::BufferTypeAttr::get( unwrap(ctx), static_cast(bufferType))); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("value", [](tt::ttnn::BufferTypeAttr self) { return static_cast(self.getValue()); }); - py::class_(m, "ShardSpecAttr") + + tt_attribute_class(m, "ShardSpecAttr") .def_static("get", [](MlirContext ctx, tt::ttnn::ShapeAttr shardShape) { return wrap( tt::ttnn::ShardSpecAttr::get(unwrap(ctx), shardShape)); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("shard_shape", &tt::ttnn::ShardSpecAttr::getShardShape); - py::class_(m, "MemoryConfigAttr") + + tt_attribute_class(m, "MemoryConfigAttr") .def_static("get", [](MlirContext ctx, tt::ttnn::TensorMemoryLayoutAttr tensorMemoryLayoutAttr, @@ -149,57 +103,29 @@ void populateTTNNModule(py::module &m) { unwrap(ctx), tt::ttnn::ShapeAttr::get(unwrap(ctx), shardShape)))); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = mlir::dyn_cast( - unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("tensor_memory_layout", &tt::ttnn::MemoryConfigAttr::getTensorMemoryLayout) .def_property_readonly("buffer_type", &tt::ttnn::MemoryConfigAttr::getBufferType) .def_property_readonly("shard_spec", &tt::ttnn::MemoryConfigAttr::getShardSpec); - py::class_(m, "ShapeAttr") + + tt_attribute_class(m, "ShapeAttr") .def_static("get", [](MlirContext ctx, std::vector shape) { return wrap(tt::ttnn::ShapeAttr::get(unwrap(ctx), shape)); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("shape", [](tt::ttnn::ShapeAttr self) { return std::vector(self.getShape().begin(), self.getShape().end()); }); - py::class_(m, "MeshShapeAttr") + + tt_attribute_class(m, "MeshShapeAttr") .def_static("get", [](MlirContext ctx, int64_t y, int64_t x) { return wrap( tt::ttnn::MeshShapeAttr::get(unwrap(ctx), y, x)); }) - .def_static("maybe_downcast", - [](MlirAttribute attr) - -> std::variant { - auto res = - mlir::dyn_cast(unwrap(attr)); - if (res) - return res; - else - return py::none(); - }) .def_property_readonly("y", &tt::ttnn::MeshShapeAttr::getY) .def_property_readonly("x", &tt::ttnn::MeshShapeAttr::getX); } From f71f5474e329a3a0994051dd4d0042a822546f46 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 14 Nov 2024 20:25:14 +0000 Subject: [PATCH 03/22] Removed redundant imports --- python/TTModule.cpp | 8 +------- python/TTNNModule.cpp | 1 - 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/python/TTModule.cpp b/python/TTModule.cpp index 6ce9d074bf..6876268621 100644 --- a/python/TTModule.cpp +++ b/python/TTModule.cpp @@ -3,7 +3,6 @@ // SPDX-License-Identifier: Apache-2.0 #include -#include #include #include "ttmlir/Bindings/Python/TTMLIRModule.h" @@ -12,12 +11,7 @@ #include "mlir/CAPI/IR.h" #include "ttmlir/Dialect/TT/IR/TTOpsTypes.h" - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wcovered-switch-default" -#include "ttmlir/Target/Common/types_generated.h" -#pragma clang diagnostic pop - +#include "ttmlir/Target/Common/Target.h" #include "ttmlir/Utils.h" namespace mlir::ttmlir::python { diff --git a/python/TTNNModule.cpp b/python/TTNNModule.cpp index fcef42eb11..24bd05c8f9 100644 --- a/python/TTNNModule.cpp +++ b/python/TTNNModule.cpp @@ -3,7 +3,6 @@ // SPDX-License-Identifier: Apache-2.0 #include "ttmlir/Bindings/Python/TTMLIRModule.h" -#include namespace mlir::ttmlir::python { void populateTTNNModule(py::module &m) { From 5ee502af514e44b9483c0e29c72d1ef939269198 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 14 Nov 2024 21:43:44 +0000 Subject: [PATCH 04/22] Lint Fixes --- include/ttmlir/Bindings/Python/TTMLIRModule.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/ttmlir/Bindings/Python/TTMLIRModule.h b/include/ttmlir/Bindings/Python/TTMLIRModule.h index d6418d02a2..5f2d4e134d 100644 --- a/include/ttmlir/Bindings/Python/TTMLIRModule.h +++ b/include/ttmlir/Bindings/Python/TTMLIRModule.h @@ -33,10 +33,10 @@ py::class_ tt_attribute_class(py::module &m, const char *class_name) { cls.def_static("maybe_downcast", [](MlirAttribute attr) -> std::variant { auto res = mlir::dyn_cast(unwrap(attr)); - if (res) + if (res) { return res; - else - return py::none(); + } + return py::none(); }); return cls; } @@ -47,10 +47,10 @@ py::class_ tt_type_class(py::module &m, const char *class_name) { cls.def_static("maybe_downcast", [](MlirType type) -> std::variant { auto res = mlir::dyn_cast(unwrap(type)); - if (res) + if (res) { return res; - else - return py::none(); + } + return py::none(); }); return cls; } From 87244a01f5632793364bca26b3d82044f75224cc Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Fri, 15 Nov 2024 22:54:24 +0000 Subject: [PATCH 05/22] new MLIR module for parsing TTNN modules --- include/ttmlir-c/TTAttrs.h | 3 + lib/CAPI/TTAttrs.cpp | 4 + python/TTModule.cpp | 25 +- python/ttmlir/dialects/ttnn.py | 1 + tools/explorer/CMakeLists.txt | 2 +- .../tt_adapter/src/tt_adapter/main.py | 4 +- .../tt_adapter/src/tt_adapter/mlir.py | 509 ++++++++++++++++++ .../tt_adapter/src/tt_adapter/utils.py | 3 +- 8 files changed, 543 insertions(+), 8 deletions(-) create mode 100644 tools/explorer/tt_adapter/src/tt_adapter/mlir.py diff --git a/include/ttmlir-c/TTAttrs.h b/include/ttmlir-c/TTAttrs.h index cdaa67c185..bfad958c98 100644 --- a/include/ttmlir-c/TTAttrs.h +++ b/include/ttmlir-c/TTAttrs.h @@ -83,6 +83,9 @@ MLIR_CAPI_EXPORTED MlirAttribute ttmlirTTChipPhysicalCoresAttrGet( MlirAttribute *dram, size_t dramSize, MlirAttribute *eth, size_t ethSize, MlirAttribute *eth_inactive, size_t eth_inactiveSize); +MLIR_CAPI_EXPORTED MlirAttribute ttmlirTTCoreCoordAttrGet(MlirContext ctx, + int64_t y, int64_t x); + #ifdef __cplusplus } #endif diff --git a/lib/CAPI/TTAttrs.cpp b/lib/CAPI/TTAttrs.cpp index b4bdc0e31a..b20ef4faa2 100644 --- a/lib/CAPI/TTAttrs.cpp +++ b/lib/CAPI/TTAttrs.cpp @@ -212,4 +212,8 @@ MlirAttribute ttmlirTTChipPhysicalCoresAttrGet( ethVec, ethInactiveVec)); } +MlirAttribute ttmlirTTCoreCoordAttrGet(MlirContext ctx, int64_t y, int64_t x) { + return wrap(CoreCoordAttr::get(unwrap(ctx), y, x)); +} + } // namespace mlir::tt diff --git a/python/TTModule.cpp b/python/TTModule.cpp index 6876268621..55630f76b7 100644 --- a/python/TTModule.cpp +++ b/python/TTModule.cpp @@ -90,7 +90,8 @@ void populateTTModule(py::module &m) { return static_cast(la.getOobVal()); }) .def_property_readonly("grid_attr", &tt::LayoutAttr::getGrid) - .def_property_readonly("memref", &tt::LayoutAttr::getMemref) + .def_property_readonly( + "memref", [](tt::LayoutAttr self) { return wrap(self.getMemref()); }) .def_property_readonly("memory_space", &tt::LayoutAttr::getMemorySpace) .def_property_readonly("memory_space_as_int", [](tt::LayoutAttr la) { @@ -99,6 +100,8 @@ void populateTTModule(py::module &m) { }) .def_property_readonly("shard_shape", &tt::LayoutAttr::getShardShape) .def_property_readonly("memory_layout", &tt::LayoutAttr::getMemLayout) + .def_property_readonly( + "linear", [](tt::LayoutAttr self) { return wrap(self.getLinear()); }) .def_property_readonly("memory_layout_as_int", [](tt::LayoutAttr la) { return static_cast(la.getMemLayout()); }); @@ -236,6 +239,14 @@ void populateTTModule(py::module &m) { return self.getEthInactive().vec(); }); + tt_attribute_class(m, "CoreCoordAttr") + .def_static("get", + [](MlirContext ctx, int64_t y, int64_t x) { + return wrap(tt::CoreCoordAttr::get(unwrap(ctx), y, x)); + }) + .def_property_readonly("y", &tt::CoreCoordAttr::getY) + .def_property_readonly("x", &tt::CoreCoordAttr::getX); + tt_attribute_class(m, "ChipCoordAttr") .def_static("get", [](MlirContext ctx, unsigned rack, unsigned shelf, unsigned y, @@ -424,8 +435,11 @@ void populateTTModule(py::module &m) { return mlir::cast(unwrap(self)); }) .def_property_readonly("grid_attr", &tt::DeviceAttr::getWorkerGrid) - .def_property_readonly("l1_map", &tt::DeviceAttr::getL1Map) - .def_property_readonly("dram_map", &tt::DeviceAttr::getDramMap) + .def_property_readonly( + "l1_map", [](tt::DeviceAttr self) { return wrap(self.getL1Map()); }) + .def_property_readonly( + "dram_map", + [](tt::DeviceAttr self) { return wrap(self.getDramMap()); }) .def_property_readonly( "mesh_shape", [](tt::DeviceAttr const &self) { return self.getMeshShape().vec(); }) @@ -441,7 +455,10 @@ void populateTTModule(py::module &m) { unwrap(ctx), SmallVector{height, width}, static_cast(dataType))); }) - .def_property_readonly("data_type", &tt::TileType::getDataType) + .def_property_readonly("data_type_as_int", + [](tt::TileType self) { + return static_cast(self.getDataType()); + }) .def_property_readonly("shape", [](tt::TileType const &tile) { return std::vector({tile.getHeight(), tile.getWidth()}); }); diff --git a/python/ttmlir/dialects/ttnn.py b/python/ttmlir/dialects/ttnn.py index d81f58111a..659938cf66 100644 --- a/python/ttmlir/dialects/ttnn.py +++ b/python/ttmlir/dialects/ttnn.py @@ -3,4 +3,5 @@ # SPDX-License-Identifier: Apache-2.0 from ._ttnn_ops_gen import * +from ._ttnn_enum_gen import * from .._mlir_libs._ttmlir import register_dialect, ttnn_ir as ir diff --git a/tools/explorer/CMakeLists.txt b/tools/explorer/CMakeLists.txt index 7ad0791b87..941cead2bb 100644 --- a/tools/explorer/CMakeLists.txt +++ b/tools/explorer/CMakeLists.txt @@ -17,7 +17,7 @@ ExternalProject_Add( add_custom_target(explorer COMMENT "Building tt-explorer... ${TTMLIR_BIN_DIR}" - COMMAND pip install ${CMAKE_CURRENT_SOURCE_DIR}/tt_adapter + COMMAND pip install -e ${CMAKE_CURRENT_SOURCE_DIR}/tt_adapter COMMAND pip install ${CMAKE_CURRENT_SOURCE_DIR}/model-explorer/src/model-explorer/src/server/package DEPENDS TTMLIRPythonModules model-explorer ttrt diff --git a/tools/explorer/tt_adapter/src/tt_adapter/main.py b/tools/explorer/tt_adapter/src/tt_adapter/main.py index 2bb3ece81a..3605cd0f5b 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/main.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/main.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 from typing import Dict import model_explorer -from . import ttir, runner, utils +from . import ttir, runner, utils, mlir import dataclasses import enum @@ -46,7 +46,7 @@ def convert( module = utils.parse_mlir_file(model_path) # Convert TTIR to Model Explorer Graphs and Display/Return - graph = ttir.ttir_to_graph(module) + graph = mlir.build_graph(module) return {"graphs": [graph]} def execute( diff --git a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py new file mode 100644 index 0000000000..825d9b2da0 --- /dev/null +++ b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py @@ -0,0 +1,509 @@ +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +# Utility library for parsing MLIR + +from collections import defaultdict +from model_explorer import graph_builder + +from ttmlir.dialects import tt, ttnn, ttir +from ttmlir import ir + + +def get_loc_str(loc): + try: + res = str(loc).split('"')[1] + except: + res = "unknown" + return res + + +class AttrHandler: + ATTR_HANDLERS = {} + + @staticmethod + def default_parser(attr): + return graph_builder.KeyValue(key=attr.name, value=str(attr.attr)) + + @staticmethod + def parse_attr(attr): + # Need to parse the attr and return the relevant KV data into + if attr.name in AttrHandler.ATTR_HANDLERS: + return AttrHandler.ATTR_HANDLERS[attr.name](attr.attr) + else: + # Unknown Attr Type, return default parser + return AttrHandler.default_parser(attr) + + @staticmethod + def register_handler(attr_name): + def decorator(handler): + AttrHandler.ATTR_HANDLERS[attr_name] = handler + return handler + + return decorator + + +@AttrHandler.register_handler("tt.device") +def parse_tt_device(attr): + device = tt.ir.DeviceAttr.maybe_downcast(attr) + result = [] + result.append( + graph_builder.KeyValue( + key="device_chip_ids", value=", ".join(map(str, device.chip_ids)) + ) + ) + result.append( + graph_builder.KeyValue( + key="device_grid_shape", value=str(device.grid_attr.shape) + ) + ) + if device.mesh_shape: + result.append( + graph_builder.KeyValue( + key="device_mesh_shape", value=str(device.mesh_shape) + ) + ) + result.append(graph_builder.KeyValue(key="device_l1_map", value=str(device.l1_map))) + result.append( + graph_builder.KeyValue(key="device_dram_map", value=str(device.dram_map)) + ) + return result + + +@AttrHandler.register_handler("tt.system_desc") +def parse_tt_system_desc(attr): + system_desc = tt.ir.SystemDescAttr.maybe_downcast(attr) + result = [] + for i, chip_desc, chip_coord, chip_capability in zip( + system_desc.chip_desc_indices, + system_desc.chip_descs, + system_desc.chip_coords, + system_desc.chip_capabilities, + ): + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-arch", value=str(tt.Arch(chip_desc.arch.arch_as_int)) + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-capability", + value=str(tt.ChipCapability(chip_capability.capability_as_int)), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-coord", + value="x".join( + map( + str, + (chip_coord.rack, chip_coord.shelf, chip_coord.y, chip_coord.x), + ) + ), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-dram-channel-size", + value=str(chip_desc.dram_channel_size), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-dram-unreserved-base", + value=str(chip_desc.dram_unreserved_base), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-dram-unreserved-end", + value=str(chip_desc.dram_unreserved_end), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-erisc-l1-unreserved-size", + value=str(chip_desc.erisc_l1_unreserved_base), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-grid", value="x".join(map(str, chip_desc.grid)) + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-l1-size", value=str(chip_desc.l1_size) + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-l1-unreserved-base", + value=str(chip_desc.l1_unreserved_base), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-noc-dram-address-align-bytes", + value=str(chip_desc.noc_dram_address_align_bytes), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-noc-l1-address-align-bytes", + value=str(chip_desc.noc_l1_address_align_bytes), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-num-cbs", value=str(chip_desc.num_cbs) + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-num-dram-channels", + value=str(chip_desc.num_dram_channels), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-pcie-address-align-bytes", + value=str(chip_desc.pcie_address_align_bytes), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-usable-dram-channel-size", + value=str(chip_desc.usable_dram_channel_size), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-usable-l1-size", value=str(chip_desc.usable_l1_size) + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-supported-data-types", + value=", ".join( + [str(tt.DataType(dt)) for dt in chip_desc.supported_data_types] + ), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-supported-tile-sizes", + value=", ".join( + [ + "x".join(map(str, (tsize.y, tsize.x))) + for tsize in chip_desc.supported_data_types + ] + ), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-dram-core-coords", + value=", ".join( + [ + "x".join(map(str, (coord.y, coord.x))) + for coord in chip_desc.chip_physical_cores.dram + ] + ), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-eth-core-coords", + value=", ".join( + [ + "x".join(map(str, (coord.y, coord.x))) + for coord in chip_desc.chip_physical_cores.eth + ] + ), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-eth-inactive-core-coords", + value=", ".join( + [ + "x".join(map(str, (coord.y, coord.x))) + for coord in chip_desc.chip_physical_cores.eth_inactive + ] + ), + ) + ) + result.append( + graph_builder.KeyValue( + key=f"chip#{i}-worker-core-coords", + value=", ".join( + [ + "x".join(map(str, (coord.y, coord.x))) + for coord in chip_desc.chip_physical_cores.worker + ] + ), + ) + ) + return result + + +@AttrHandler.register_handler("mesh_shape") +def parse_mesh_shape(attr): + mesh_shape = ttnn.ir.MeshShapeAttr.maybe_downcast(attr) + return [ + graph_builder.KeyValue( + key="mesh_shape", value="x".join(map(str, (mesh_shape.y, mesh_shape.x))) + ) + ] + + +@AttrHandler.register_handler("layout") +def parse_layout(attr): + # This is for parsing TTNN Layouts (Enum) + layout = ttnn.ir.LayoutAttr.maybe_downcast(attr) + return [graph_builder.KeyValue(key="layout", value=str(ttnn.Layout(layout.value)))] + + +@AttrHandler.register_handler("memory_config") +def parse_memory_config(attr): + memory_config = ttnn.ir.MemoryConfigAttr.maybe_downcast(attr) + result = [] + result.append( + graph_builder.KeyValue( + key="buffer-type", + value=str(ttnn.BufferType(memory_config.buffer_type.value)), + ) + ) + result.append( + graph_builder.KeyValue( + key="shard-shape", + value="x".join(map(str, memory_config.shard_spec.shard_shape.shape)), + ) + ) + result.append( + graph_builder.KeyValue( + key="tensor-memory-layout", + value=str( + ttnn.TensorMemoryLayout(memory_config.tensor_memory_layout.value) + ), + ) + ) + return result + + +@AttrHandler.register_handler("force") +def parse_force(attr): + return [graph_builder.KeyValue(key="force", value=str(attr.value))] + + +@AttrHandler.register_handler("dtype") +def parse_dtype(attr): + dtype = tt.ir.DataTypeAttr.maybe_downcast(attr) + return [ + graph_builder.KeyValue( + key="dtype", value=str(tt.DataType(dtype.data_type_as_int)) + ) + ] + + +@AttrHandler.register_handler("shape") +def parse_shape(attr): + shape = ttnn.ir.ShapeAttr.maybe_downcast(attr) + return [graph_builder.KeyValue(key="shape", value="x".join(map(str, shape.shape)))] + + +@AttrHandler.register_handler("operandSegmentSizes") +def parse_operandSegmentSizes(attr): + return [graph_builder.KeyValue(key="operandSegmentSizes", value=str(list(attr)))] + + +@AttrHandler.register_handler("dimension") +def parse_dimension(attr): + return [graph_builder.KeyValue(key="dimension", value=str(attr.value))] + + +@AttrHandler.register_handler("tt.layout") +def parse_tt_layout(attr): + layout = tt.ir.LayoutAttr.maybe_downcast(attr) + result = [] + result.append(graph_builder.KeyValue(key="linear", value=str(layout.linear))) + result.append( + graph_builder.KeyValue( + key="memory_space", value=str(tt.MemorySpace(layout.memory_space_as_int)) + ) + ) + result.append( + graph_builder.KeyValue( + key="memory_layout", + value=str(tt.TensorMemoryLayout(layout.memory_layout_as_int)), + ) + ) + result.append( + graph_builder.KeyValue( + key="grid_shape", value="x".join(map(str, layout.grid_attr.shape)) + ) + ) + result.append( + graph_builder.KeyValue(key="memref_shape", value=str(layout.memref.shape)) + ) + result.append( + graph_builder.KeyValue(key="memref_rank", value=str(layout.memref.rank)) + ) + tile_type = tt.ir.TileType.maybe_downcast(layout.memref.element_type) + if tile_type is not None: + result.append( + graph_builder.KeyValue( + key="tile_datatype", value=str(tt.DataType(tile_type.data_type_as_int)) + ) + ) + result.append( + graph_builder.KeyValue( + key="tile_shape", value="x".join(map(str, tile_type.shape)) + ) + ) + return result + + +class OpHandler: + def __init__(self, op): + self.op = op + self.attrs = [] + + def get_id(self, names: defaultdict): + name = get_loc_str(self.op.location) + name_num = names[name] + id = name + "__" + str(name_num) + names[name] += 1 + return id + + @staticmethod + def get_namespace(op): + name = get_loc_str(op.location) + if op.parent and op.parent.name != "builtin.module": + return OpHandler.get_namespace(op.parent) + "/" + name + return name + + def get_attributes(self): + # Parse Op Attributes themselves + for attr in self.op.attributes: + self.attrs.extend(AttrHandler.parse_attr(attr)) + + def make_graph_node(self, name_dict): + self.get_attributes() + return graph_builder.GraphNode( + id=self.get_id(name_dict), + label=self.op.name, + namespace=OpHandler.get_namespace(self.op), + attrs=self.attrs, + ) + + def make_constant_node(self, name_dict, constant_name): + return graph_builder.GraphNode( + id=self.get_id(name_dict), + label=constant_name, + namespace=OpHandler.get_namespace(self.op), + ) + + +EMPTY_OPS = [ + "ttnn.empty", + "tensor.empty", +] + +FILTERED_OPS = [ + "ttnn.deallocate", + "ttnn.get_device", +] + + +def build_graph(module): + name_dict = defaultdict(int) + output_connections = defaultdict(int) + graph = graph_builder.Graph(id="tt-graph") + + op_to_graph_node = {} + + module_op = OpHandler(module.operation) + graph.nodes.append(module_op.make_graph_node(name_dict)) + + for op in module.body.operations: + append_later = [] + for region in op.regions: + for block in region.blocks: + for op in block.operations: + # Create all the nodes and constants in the first pass. + operation = OpHandler(op) + graph_node = operation.make_graph_node(name_dict) + + if op.name in "tensor.empty": + append_later.append(graph_node) + elif op.name not in FILTERED_OPS: + graph.nodes.append(graph_node) + + op_to_graph_node[op] = graph_node + + for operand in op.operands: + if operand.owner == block and operand not in op_to_graph_node: + # This is a constant and we need to create a node for it. + operand_node = operation.make_constant_node( + name_dict, operand.get_name() + ) + graph.nodes.append(operand_node) + op_to_graph_node[operand] = operand_node + + # This puts the node at the far right when viewing which is a bit more consistant with it being the last operand. + for node in append_later: + graph.nodes.append(node) + + for op in block.operations: + # Create all the edges in the second pass. + for operand_index, operand in enumerate(op.operands): + if operand.owner == block: + source_node = op_to_graph_node[operand] + else: + source_node = op_to_graph_node[operand.owner] + + target_node = op_to_graph_node[op] + + target_node.incomingEdges.append( + graph_builder.IncomingEdge( + sourceNodeId=source_node.id, + sourceNodeOutputId=output_connections[source_node.id], + targetNodeInputId=operand_index, + ) + ) + + output_attrs = [] + if isinstance(operand.type, ir.RankedTensorType): + output_attrs = [ + graph_builder.KeyValue( + key="shape", value=str(operand.type.shape) + ), + graph_builder.KeyValue( + key="dtype", value=str(operand.type.element_type) + ), + graph_builder.KeyValue( + key="rank", value=str(operand.type.rank) + ), + ] + if hasattr(operand.type, "encoding"): + output_attrs.extend( + AttrHandler.parse_attr( + operand.type.encoding.get_named("tt.layout") + ) + ) + source_node.outputsMetadata.append( + graph_builder.MetadataItem( + id=str(output_connections[source_node.id]), + attrs=[ + graph_builder.KeyValue( + key="__tensor_tag", value=target_node.label + ), + ] + + output_attrs, + ) + ) + output_connections[source_node.id] += 1 + + return graph diff --git a/tools/explorer/tt_adapter/src/tt_adapter/utils.py b/tools/explorer/tt_adapter/src/tt_adapter/utils.py index fe68d89ac5..5a0a9299cf 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/utils.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/utils.py @@ -9,5 +9,6 @@ def parse_mlir_file(model_path): ttmlir.dialects.ttkernel.register_dialect(ctx) ttmlir.dialects.ttir.register_dialect(ctx) ttmlir.dialects.tt.register_dialect(ctx) - module = ttmlir.ir.Module.parse("".join(model_file.readlines()), ctx) + ttmlir.dialects.ttnn.register_dialect(ctx) + module = ttmlir.ir.Module.parse(model_file.read(), ctx) return module From 97e6fbd16dd2c4dbbec8d4c02ea6e5bf493838bf Mon Sep 17 00:00:00 2001 From: Vraj Prajapati Date: Mon, 18 Nov 2024 21:53:16 +0000 Subject: [PATCH 06/22] Added TTNNLayout Support + Fixes --- include/ttmlir-c/TTNNAttrs.h | 5 ++ lib/CAPI/TTNNAttrs.cpp | 10 ++++ python/TTNNModule.cpp | 23 ++++++++ .../tt_adapter/src/tt_adapter/mlir.py | 58 ++++++++++++++++--- 4 files changed, 88 insertions(+), 8 deletions(-) diff --git a/include/ttmlir-c/TTNNAttrs.h b/include/ttmlir-c/TTNNAttrs.h index a7f5a8170d..ea3e333c2d 100644 --- a/include/ttmlir-c/TTNNAttrs.h +++ b/include/ttmlir-c/TTNNAttrs.h @@ -5,6 +5,7 @@ #ifndef TTMLIR_C_TTNNATTRS_H #define TTMLIR_C_TTNNATTRS_H +#include "mlir-c/AffineMap.h" #include "ttmlir-c/Dialects.h" #ifdef __cplusplus @@ -44,6 +45,10 @@ MLIR_CAPI_EXPORTED MlirAttribute ttmlirTTNNMeshShapeAttrGet(MlirContext ctx, int64_t y, int64_t x); +MLIR_CAPI_EXPORTED MlirAttribute ttmlirTTNNTTNNLayoutAttrGet( + MlirContext ctx, MlirAffineMap linear, MlirAttribute grid, MlirType memref, + unsigned memLayout); + #ifdef __cplusplus } #endif diff --git a/lib/CAPI/TTNNAttrs.cpp b/lib/CAPI/TTNNAttrs.cpp index 0fb1066cb8..677f22fb42 100644 --- a/lib/CAPI/TTNNAttrs.cpp +++ b/lib/CAPI/TTNNAttrs.cpp @@ -69,4 +69,14 @@ MlirAttribute ttmlirTTNNMeshShapeAttrGet(MlirContext ctx, int64_t y, return wrap(MeshShapeAttr::get(unwrap(ctx), y, x)); } +MlirAttribute ttmlirTTNNTTNNLayoutAttrGet(MlirContext ctx, MlirAffineMap linear, + MlirAttribute grid, MlirType memref, + unsigned memLayout) { + mlir::AffineMap affineMap = mlir::AffineMap::getFromOpaquePointer(linear.ptr); + return wrap(TTNNLayoutAttr::get(unwrap(ctx), affineMap, + mlir::cast(unwrap(grid)), + mlir::cast(unwrap(memref)), + static_cast(memLayout))); +} + } // namespace mlir::tt::ttnn diff --git a/python/TTNNModule.cpp b/python/TTNNModule.cpp index 24bd05c8f9..11e47982da 100644 --- a/python/TTNNModule.cpp +++ b/python/TTNNModule.cpp @@ -2,6 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 +#include "mlir/CAPI/AffineMap.h" #include "ttmlir/Bindings/Python/TTMLIRModule.h" namespace mlir::ttmlir::python { @@ -127,5 +128,27 @@ void populateTTNNModule(py::module &m) { }) .def_property_readonly("y", &tt::ttnn::MeshShapeAttr::getY) .def_property_readonly("x", &tt::ttnn::MeshShapeAttr::getX); + + tt_attribute_class(m, "TTNNLayoutAttr") + .def_static("get", + [](MlirContext ctx, MlirAffineMap linear, MlirAttribute grid, + MlirType memref, unsigned memLayout) { + return wrap(tt::ttnn::TTNNLayoutAttr::get( + unwrap(ctx), mlir::cast(unwrap(linear)), + mlir::cast(unwrap(grid)), + mlir::cast(unwrap(memref)), + static_cast(memLayout))); + }) + .def_property_readonly( + "linear", + [](tt::ttnn::TTNNLayoutAttr self) { return wrap(self.getLinear()); }) + .def_property_readonly("grid_attr", &tt::ttnn::TTNNLayoutAttr::getGrid) + .def_property_readonly( + "memref", + [](tt::ttnn::TTNNLayoutAttr self) { return wrap(self.getMemref()); }) + .def_property_readonly( + "memory_layout_as_int", [](tt::ttnn::TTNNLayoutAttr self) { + return static_cast(self.getMemLayout()); + }); } } // namespace mlir::ttmlir::python diff --git a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py index 825d9b2da0..98bbc6e5c9 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py @@ -23,7 +23,7 @@ class AttrHandler: @staticmethod def default_parser(attr): - return graph_builder.KeyValue(key=attr.name, value=str(attr.attr)) + return [graph_builder.KeyValue(key=attr.name, value=str(attr.attr))] @staticmethod def parse_attr(attr): @@ -186,7 +186,10 @@ def parse_tt_system_desc(attr): graph_builder.KeyValue( key=f"chip#{i}-supported-data-types", value=", ".join( - [str(tt.DataType(dt)) for dt in chip_desc.supported_data_types] + [ + str(tt.DataType(dt.data_type_as_int)) + for dt in chip_desc.supported_data_types + ] ), ) ) @@ -196,7 +199,7 @@ def parse_tt_system_desc(attr): value=", ".join( [ "x".join(map(str, (tsize.y, tsize.x))) - for tsize in chip_desc.supported_data_types + for tsize in chip_desc.supported_tile_sizes ] ), ) @@ -365,6 +368,37 @@ def parse_tt_layout(attr): return result +@AttrHandler.register_handler("ttnn_layout") +def parse_ttnn_ttnn_layout(attr): + layout = ttnn.ir.TTNNLayoutAttr.maybe_downcast(attr) + result = [] + result.append(graph_builder.KeyValue(key="linear", value=str(layout.linear))) + result.append( + graph_builder.KeyValue( + key="memory_layout", + value=str(ttnn.TensorMemoryLayout(layout.memory_layout_as_int)), + ) + ) + result.append( + graph_builder.KeyValue( + key="grid_shape", value="x".join(map(str, layout.grid_attr.shape)) + ) + ) + result.append( + graph_builder.KeyValue(key="memref_shape", value=str(layout.memref.shape)) + ) + result.append( + graph_builder.KeyValue(key="memref_rank", value=str(layout.memref.rank)) + ) + buffer_attr = ttnn.ir.BufferTypeAttr.maybe_downcast(layout.memref.memory_space) + result.append( + graph_builder.KeyValue( + key="memref_memory_space", value=str(ttnn.BufferType(buffer_attr.value)) + ) + ) + return result + + class OpHandler: def __init__(self, op): self.op = op @@ -436,7 +470,7 @@ def build_graph(module): operation = OpHandler(op) graph_node = operation.make_graph_node(name_dict) - if op.name in "tensor.empty": + if op.name in EMPTY_OPS: append_later.append(graph_node) elif op.name not in FILTERED_OPS: graph.nodes.append(graph_node) @@ -488,11 +522,19 @@ def build_graph(module): ), ] if hasattr(operand.type, "encoding"): - output_attrs.extend( - AttrHandler.parse_attr( - operand.type.encoding.get_named("tt.layout") + if "ttnn_layout" in str(operand.type.encoding): + output_attrs.extend( + AttrHandler.parse_attr( + operand.type.encoding.get_named("ttnn_layout") + ) + ) + else: + # Parse as a standard layout + output_attrs.extend( + AttrHandler.parse_attr( + operand.type.encoding.get_named("tt.layout") + ) ) - ) source_node.outputsMetadata.append( graph_builder.MetadataItem( id=str(output_connections[source_node.id]), From d634d38e8c75723891f14240289c9c2668147395 Mon Sep 17 00:00:00 2001 From: Vraj Prajapati Date: Tue, 19 Nov 2024 17:39:41 +0000 Subject: [PATCH 07/22] editable on Debug, minor fixes --- tools/explorer/CMakeLists.txt | 2 +- tools/explorer/tt_adapter/src/tt_adapter/mlir.py | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tools/explorer/CMakeLists.txt b/tools/explorer/CMakeLists.txt index 941cead2bb..44613b2671 100644 --- a/tools/explorer/CMakeLists.txt +++ b/tools/explorer/CMakeLists.txt @@ -17,7 +17,7 @@ ExternalProject_Add( add_custom_target(explorer COMMENT "Building tt-explorer... ${TTMLIR_BIN_DIR}" - COMMAND pip install -e ${CMAKE_CURRENT_SOURCE_DIR}/tt_adapter + COMMAND pip install $<$:-e> ${CMAKE_CURRENT_SOURCE_DIR}/tt_adapter COMMAND pip install ${CMAKE_CURRENT_SOURCE_DIR}/model-explorer/src/model-explorer/src/server/package DEPENDS TTMLIRPythonModules model-explorer ttrt diff --git a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py index 98bbc6e5c9..73cffcfb96 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py @@ -313,6 +313,8 @@ def parse_dtype(attr): @AttrHandler.register_handler("shape") def parse_shape(attr): shape = ttnn.ir.ShapeAttr.maybe_downcast(attr) + if not shape: + return [graph_builder.KeyValue(key="shape", value=str(attr))] return [graph_builder.KeyValue(key="shape", value="x".join(map(str, shape.shape)))] @@ -411,11 +413,11 @@ def get_id(self, names: defaultdict): names[name] += 1 return id - @staticmethod - def get_namespace(op): + def get_namespace(self, parent_op=None): + op = self.op if not parent_op else parent_op name = get_loc_str(op.location) if op.parent and op.parent.name != "builtin.module": - return OpHandler.get_namespace(op.parent) + "/" + name + return self.get_namespace(op.parent) + "/" + name return name def get_attributes(self): @@ -428,7 +430,7 @@ def make_graph_node(self, name_dict): return graph_builder.GraphNode( id=self.get_id(name_dict), label=self.op.name, - namespace=OpHandler.get_namespace(self.op), + namespace=self.get_namespace(), attrs=self.attrs, ) @@ -436,7 +438,7 @@ def make_constant_node(self, name_dict, constant_name): return graph_builder.GraphNode( id=self.get_id(name_dict), label=constant_name, - namespace=OpHandler.get_namespace(self.op), + namespace=self.get_namespace(), ) @@ -521,7 +523,7 @@ def build_graph(module): key="rank", value=str(operand.type.rank) ), ] - if hasattr(operand.type, "encoding"): + if hasattr(operand.type, "encoding") and operand.type.encoding: if "ttnn_layout" in str(operand.type.encoding): output_attrs.extend( AttrHandler.parse_attr( From c9f972c86e003bb7e587cb8aec2b789c99f2d583 Mon Sep 17 00:00:00 2001 From: Vraj Prajapati Date: Tue, 19 Nov 2024 20:58:13 +0000 Subject: [PATCH 08/22] Use check-ttmlir to generate test-cases for tt-explorer --- .github/workflows/build-and-test.yml | 15 +++++++++++++++ tools/explorer/test/run_tests.py | 8 +++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index ade377c06a..65c2680b17 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -504,6 +504,21 @@ jobs: source env/activate cmake --build ${{ steps.strings.outputs.build-output-dir }} -- explorer + - name: Build ttrt + shell: bash + run: | + source env/activate + cmake --build ${{ steps.strings.outputs.build-output-dir }} -- ttrt + + - name: Generate tests + shell: bash + run: | + source env/activate + ttrt query --save-artifacts + export SYSTEM_DESC_PATH="${GITHUB_WORKSPACE}/ttrt-artifacts/system_desc.ttsys" + cmake --build ${{ steps.strings.outputs.build-output-dir }} -- check-ttmlir + export TT_EXPLORER_GENERATED_TEST_DIR=${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon/TTNN + - name: Run tt-explorer tests shell: bash run: | diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index ceff14ae0a..46872c7bf9 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -8,6 +8,7 @@ import multiprocessing import pytest import glob +import os HOST = "localhost" PORT = 8002 @@ -20,11 +21,16 @@ "test/ttmlir/Silicon/TTNN/optimizer/mnist_sharding_tiled.mlir", ] +if "TT_EXPLORER_GENERATED_TEST_DIR" in os.environ: + TEST_LOAD_MODEL_PATHS.append( + os.environ["TT_EXPLORER_GENERATED_TEST_DIR"] + "/**/*.mlir" + ) + def get_test_files(paths): files = [] for path in paths: - files.extend(glob.glob(path)) + files.extend(glob.glob(path, recursive=True)) return files From d46388163d7669c88285bc10e4408d0a23263f74 Mon Sep 17 00:00:00 2001 From: Vraj Prajapati Date: Tue, 19 Nov 2024 22:12:25 +0000 Subject: [PATCH 09/22] Attempted to fix tt-explorer job --- .github/workflows/build-and-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 65c2680b17..800b8ecc02 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -517,10 +517,10 @@ jobs: ttrt query --save-artifacts export SYSTEM_DESC_PATH="${GITHUB_WORKSPACE}/ttrt-artifacts/system_desc.ttsys" cmake --build ${{ steps.strings.outputs.build-output-dir }} -- check-ttmlir - export TT_EXPLORER_GENERATED_TEST_DIR=${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon/TTNN - name: Run tt-explorer tests shell: bash run: | source env/activate + export TT_EXPLORER_GENERATED_TEST_DIR=${{ steps.strings.outputs.build-output-dir }}/test/ttmlir/Silicon/TTNN pytest tools/explorer/test/run_tests.py From 611375be989d6b44208fec74141d7030599e278e Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 9 Dec 2024 19:22:29 +0000 Subject: [PATCH 10/22] Fixes + Moved tests to tests/Explorer --- .github/workflows/build-and-test.yml | 14 -------------- CMakeLists.txt | 1 + docs/src/tt-explorer.md | 9 ++++----- .../ttmlir/Explorer}/forward_and_backward.mlir | 0 .../ttmlir/Explorer}/linear_autoencoder.mlir | 0 .../Explorer}/open_llama_3b_single_layer.mlir | 0 tools/explorer/CMakeLists.txt | 8 +++++++- tools/explorer/test/run_tests.py | 2 +- 8 files changed, 13 insertions(+), 21 deletions(-) rename {tools/explorer/test/models => test/ttmlir/Explorer}/forward_and_backward.mlir (100%) rename {tools/explorer/test/models => test/ttmlir/Explorer}/linear_autoencoder.mlir (100%) rename {tools/explorer/test/models => test/ttmlir/Explorer}/open_llama_3b_single_layer.mlir (100%) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 017899bd86..f87c009f5c 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -640,20 +640,6 @@ jobs: source env/activate cmake --build ${{ steps.strings.outputs.build-output-dir }} -- explorer - - name: Build ttrt - shell: bash - run: | - source env/activate - cmake --build ${{ steps.strings.outputs.build-output-dir }} -- ttrt - - - name: Generate tests - shell: bash - run: | - source env/activate - ttrt query --save-artifacts - export SYSTEM_DESC_PATH="${GITHUB_WORKSPACE}/ttrt-artifacts/system_desc.ttsys" - cmake --build ${{ steps.strings.outputs.build-output-dir }} -- check-ttmlir - - name: Run tt-explorer tests shell: bash run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index bebff7a0fd..41f928c1fd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,6 +9,7 @@ option(TT_RUNTIME_ENABLE_PERF_TRACE "Enable performance mode" OFF) option(TTMLIR_ENABLE_RUNTIME "Enable runtime" OFF) option(TTMLIR_ENABLE_STABLEHLO "Enable StableHLO support" OFF) option(TTMLIR_ENABLE_OP_MODEL "Enable OpModel support" OFF) +option(TT_EXPLORER_EDITABLE "Enable editable install mode for explorer" OFF) if (TTMLIR_ENABLE_STABLEHLO) add_compile_definitions(TTMLIR_ENABLE_STABLEHLO) diff --git a/docs/src/tt-explorer.md b/docs/src/tt-explorer.md index a8073d25be..831a7fd9ef 100644 --- a/docs/src/tt-explorer.md +++ b/docs/src/tt-explorer.md @@ -5,12 +5,11 @@ Welcome to the tt-explorer wiki! The Wiki will serve as a source for documentati ## Quick Start TT-Explorer is made to be as painless as possible, as such the installation on top of the pre-existing [`tt-mlir`](https://github.com/tenstorrent/tt-mlir) project is as minimal as possible. -1. Build `tt-mlir` +1. Build `tt-mlir`, add the `-DTT_EXPLORER_EDITABLE=ON` flag to the cmake build to install the `tt-explorer` package in editable mode. 2. Run `source env/activate` to be in `tt-mlir` virtualenv for the following steps -3. Install [`tt-adapter`](https://github.com/vprajapati-tt/tt-adapter) using `pip install -e .` in tt-adapter root directory. -4. Install `tt-explorer` using `pip install -e .` in tt-explorer root directory -5. Run `tt-explorer` in terminal to start tt-explorer instance. (Refer to CLI section in API for specifics) -6. Ensure server has started in `tt-explorer` shell instance (check for message below) +3. Install the explorer tool by building the `explorer` target using `cmake --build build -- explorer` +4. Run `tt-explorer` in terminal to start tt-explorer instance. (Refer to CLI section in API for specifics) +5. Ensure server has started in `tt-explorer` shell instance (check for message below) ```sh Starting Model Explorer server at: http://localhost:8080 diff --git a/tools/explorer/test/models/forward_and_backward.mlir b/test/ttmlir/Explorer/forward_and_backward.mlir similarity index 100% rename from tools/explorer/test/models/forward_and_backward.mlir rename to test/ttmlir/Explorer/forward_and_backward.mlir diff --git a/tools/explorer/test/models/linear_autoencoder.mlir b/test/ttmlir/Explorer/linear_autoencoder.mlir similarity index 100% rename from tools/explorer/test/models/linear_autoencoder.mlir rename to test/ttmlir/Explorer/linear_autoencoder.mlir diff --git a/tools/explorer/test/models/open_llama_3b_single_layer.mlir b/test/ttmlir/Explorer/open_llama_3b_single_layer.mlir similarity index 100% rename from tools/explorer/test/models/open_llama_3b_single_layer.mlir rename to test/ttmlir/Explorer/open_llama_3b_single_layer.mlir diff --git a/tools/explorer/CMakeLists.txt b/tools/explorer/CMakeLists.txt index 44613b2671..ea0f7e95cb 100644 --- a/tools/explorer/CMakeLists.txt +++ b/tools/explorer/CMakeLists.txt @@ -15,9 +15,15 @@ ExternalProject_Add( INSTALL_COMMAND "" ) +set(PIP_EDITABLE_FLAG "") + +if (TT_EXPLORER_EDITABLE) + set(PIP_EDITABLE_FLAG "-e") +endif() + add_custom_target(explorer COMMENT "Building tt-explorer... ${TTMLIR_BIN_DIR}" - COMMAND pip install $<$:-e> ${CMAKE_CURRENT_SOURCE_DIR}/tt_adapter + COMMAND pip install ${PIP_EDITABLE_FLAG} ${CMAKE_CURRENT_SOURCE_DIR}/tt_adapter COMMAND pip install ${CMAKE_CURRENT_SOURCE_DIR}/model-explorer/src/model-explorer/src/server/package DEPENDS TTMLIRPythonModules model-explorer ttrt diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index 46872c7bf9..d4e64fb46f 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -15,7 +15,7 @@ COMMAND_URL = "http://" + HOST + ":" + str(PORT) + "/apipost/v1/send_command" TEST_LOAD_MODEL_PATHS = [ "test/ttmlir/Dialect/TTNN/optimizer/mnist_sharding.mlir", - "tools/explorer/test/models/*.mlir", + "test/ttmlir/Explorer/*.mlir", ] TEST_EXECUTE_MODEL_PATHS = [ "test/ttmlir/Silicon/TTNN/optimizer/mnist_sharding_tiled.mlir", From a8ec9d2407d52fbe2ac34ebd34f9b13fe2c38946 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 9 Dec 2024 20:11:08 +0000 Subject: [PATCH 11/22] Shifted to visualize_from_config and added LIT check --- .../ttmlir/Explorer/{ => load}/forward_and_backward.mlir | 3 +++ test/ttmlir/Explorer/{ => load}/linear_autoencoder.mlir | 2 ++ .../Explorer/{ => load}/open_llama_3b_single_layer.mlir | 2 ++ tools/explorer/run.py | 2 +- tools/explorer/test/run_tests.py | 9 +++++++-- 5 files changed, 15 insertions(+), 3 deletions(-) rename test/ttmlir/Explorer/{ => load}/forward_and_backward.mlir (98%) rename test/ttmlir/Explorer/{ => load}/linear_autoencoder.mlir (99%) rename test/ttmlir/Explorer/{ => load}/open_llama_3b_single_layer.mlir (99%) diff --git a/test/ttmlir/Explorer/forward_and_backward.mlir b/test/ttmlir/Explorer/load/forward_and_backward.mlir similarity index 98% rename from test/ttmlir/Explorer/forward_and_backward.mlir rename to test/ttmlir/Explorer/load/forward_and_backward.mlir index 3f0b8f781d..2efcaf2ed0 100644 --- a/test/ttmlir/Explorer/forward_and_backward.mlir +++ b/test/ttmlir/Explorer/load/forward_and_backward.mlir @@ -1,3 +1,6 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s +// Need to ensure that model is valid MLIR module + module @SimpleModel attributes {} { func.func @forward(%arg0: tensor<1x784xf32> {ttir.name = "input_1"}, %arg1: tensor<10x784xf32> {ttir.name = "linear.weight"}, %arg2: tensor<10xf32> {ttir.name = "linear.bias"}) -> (tensor<1x10xf32> {ttir.name = "SimpleModel_472.output_softmax_1495"}) { %0 = tensor.empty() : tensor<784x10xf32> diff --git a/test/ttmlir/Explorer/linear_autoencoder.mlir b/test/ttmlir/Explorer/load/linear_autoencoder.mlir similarity index 99% rename from test/ttmlir/Explorer/linear_autoencoder.mlir rename to test/ttmlir/Explorer/load/linear_autoencoder.mlir index 8d7defc535..4a2a2bd5b3 100644 --- a/test/ttmlir/Explorer/linear_autoencoder.mlir +++ b/test/ttmlir/Explorer/load/linear_autoencoder.mlir @@ -1,3 +1,5 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s + module @LinearAE attributes {} { func.func @forward(%arg0: tensor<1x784xf32> {ttir.name = "input_1"}, %arg1: tensor<784x128xf32> {ttir.name = "encoder_lin1.weight"}, %arg2: tensor<128xf32> {ttir.name = "encoder_lin1.bias"}, %arg3: tensor<128x64xf32> {ttir.name = "encoder_lin2.weight"}, %arg4: tensor<64xf32> {ttir.name = "encoder_lin2.bias"}, %arg5: tensor<64x12xf32> {ttir.name = "encoder_lin3.weight"}, %arg6: tensor<12xf32> {ttir.name = "encoder_lin3.bias"}, %arg7: tensor<12x3xf32> {ttir.name = "encoder_lin4.weight"}, %arg8: tensor<3xf32> {ttir.name = "encoder_lin4.bias"}, %arg9: tensor<3x12xf32> {ttir.name = "decoder_lin1.weight"}, %arg10: tensor<12xf32> {ttir.name = "decoder_lin1.bias"}, %arg11: tensor<12x64xf32> {ttir.name = "decoder_lin2.weight"}, %arg12: tensor<64xf32> {ttir.name = "decoder_lin2.bias"}, %arg13: tensor<64x128xf32> {ttir.name = "decoder_lin3.weight"}, %arg14: tensor<128xf32> {ttir.name = "decoder_lin3.bias"}, %arg15: tensor<128x784xf32> {ttir.name = "decoder_lin4.weight"}, %arg16: tensor<784xf32> {ttir.name = "decoder_lin4.bias"}) -> (tensor<1x784xf32> {ttir.name = "LinearAE.output_add_29"}) { %0 = tensor.empty() : tensor<1x128xf32> diff --git a/test/ttmlir/Explorer/open_llama_3b_single_layer.mlir b/test/ttmlir/Explorer/load/open_llama_3b_single_layer.mlir similarity index 99% rename from test/ttmlir/Explorer/open_llama_3b_single_layer.mlir rename to test/ttmlir/Explorer/load/open_llama_3b_single_layer.mlir index 5e17dc39e9..e45f7ae321 100644 --- a/test/ttmlir/Explorer/open_llama_3b_single_layer.mlir +++ b/test/ttmlir/Explorer/load/open_llama_3b_single_layer.mlir @@ -1,3 +1,5 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s + #any_device = #tt.operand_constraint #loc = loc("LlamaForCausalLM":0:0) #system_desc = #tt.system_desc<[{role = host, target_triple = "x86_64-pc-linux-gnu"}], [{arch = , grid = 8x8, l1_size = 1499136, num_dram_channels = 12, dram_channel_size = 1073741824, noc_l1_address_align_bytes = 16, pcie_address_align_bytes = 32, noc_dram_address_align_bytes = 32, l1_unreserved_base = 1024, erisc_l1_unreserved_base = 1024, dram_unreserved_base = 1024, dram_unreserved_end = 1073741824, physical_cores = {worker = [ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 1x0, 1x1, 1x2, 1x3, 1x4, 1x5, 1x6, 1x7, 2x0, 2x1, 2x2, 2x3, 2x4, 2x5, 2x6, 2x7, 3x0, 3x1, 3x2, 3x3, 3x4, 3x5, 3x6, 3x7, 4x0, 4x1, 4x2, 4x3, 4x4, 4x5, 4x6, 4x7, 5x0, 5x1, 5x2, 5x3, 5x4, 5x5, 5x6, 5x7, 6x0, 6x1, 6x2, 6x3, 6x4, 6x5, 6x6, 6x7, 7x0, 7x1, 7x2, 7x3, 7x4, 7x5, 7x6, 7x7] dram = [ 8x0, 9x0, 10x0, 8x1, 9x1, 10x1, 8x2, 9x2, 10x2, 8x3, 9x3, 10x3]}, supported_data_types = [, , , , , , , , , , , ], supported_tile_sizes = [ 4x16, 16x16, 32x16, 4x32, 16x32, 32x32], num_cbs = 32}], [0], [3 : i32], [ 0x0x0x0]> diff --git a/tools/explorer/run.py b/tools/explorer/run.py index 6e1f8efc4f..9800fff4c6 100755 --- a/tools/explorer/run.py +++ b/tools/explorer/run.py @@ -9,4 +9,4 @@ # TODO(odjuricic): Hack to make our extension default for .mlir files. # This can be handled better when we switch to our model-explorer fork. model_explorer.extension_manager.ExtensionManager.BUILTIN_ADAPTER_MODULES = [] -model_explorer.visualize(extensions=["tt_adapter"]) +model_explorer.visualize_from_config(extensions=["tt_adapter"], no_open_in_browser=True) diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index d4e64fb46f..120e7c698a 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -53,8 +53,13 @@ def execute_command(model_path, settings): @pytest.fixture(scope="function", autouse=True) def start_server(request): server_thread = multiprocessing.Process( - target=model_explorer.visualize, - kwargs={"extensions": ["tt_adapter"], "host": HOST, "port": PORT}, + target=model_explorer.visualize_from_config, + kwargs={ + "extensions": ["tt_adapter"], + "host": HOST, + "port": PORT, + "no_open_in_browser": True, + }, ) server_thread.start() From 4263949c12ce31d86ac1c5e171c93934bd1982e7 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Tue, 10 Dec 2024 17:11:21 +0000 Subject: [PATCH 12/22] Upload test from other workflow to explorer workflow --- .github/workflows/build-and-test.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index f87c009f5c..a8030f5550 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -317,6 +317,12 @@ jobs: ln -sf ${{ steps.strings.outputs.install-output-dir }} ${{ steps.strings.outputs.build-output-dir }} llvm-lit -sv ${{ steps.strings.outputs.build-output-dir }}/test + - name: Upload tests + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.build.runs-on }}-tests + path: ${{ steps.strings.outputs.build-output-dir }}/test + - name: Run functional tests shell: bash if: matrix.build.enable_perf == 'OFF' @@ -640,6 +646,11 @@ jobs: source env/activate cmake --build ${{ steps.strings.outputs.build-output-dir }} -- explorer + - name: Download Tests + uses: actions/download-artifact@v4 + with: + name: ${{ matrix.build.runs-on }}-test + - name: Run tt-explorer tests shell: bash run: | From c72ce193d2f46ebebdca7ef9fea790036eaa6efa Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Tue, 10 Dec 2024 17:21:36 +0000 Subject: [PATCH 13/22] Upload test from build workflow instead of run --- .github/workflows/build-and-test.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index a8030f5550..45589d9c53 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -158,6 +158,12 @@ jobs: cmake --build ${{ steps.strings.outputs.build-output-dir }} -- check-ttmlir cp build/test/report.xml ${{ steps.strings.outputs.test_report_path }} + - name: Upload tests + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.build.runs-on }}-tests + path: ${{ steps.strings.outputs.build-output-dir }}/test + - name: Upload Test Report uses: actions/upload-artifact@v4 with: @@ -317,12 +323,6 @@ jobs: ln -sf ${{ steps.strings.outputs.install-output-dir }} ${{ steps.strings.outputs.build-output-dir }} llvm-lit -sv ${{ steps.strings.outputs.build-output-dir }}/test - - name: Upload tests - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.build.runs-on }}-tests - path: ${{ steps.strings.outputs.build-output-dir }}/test - - name: Run functional tests shell: bash if: matrix.build.enable_perf == 'OFF' From 09f1570b21b802847438e372da6db718e3e82b29 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Tue, 10 Dec 2024 18:03:37 +0000 Subject: [PATCH 14/22] Changed artifact name --- .github/workflows/build-and-test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 45589d9c53..18129e3622 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -161,7 +161,7 @@ jobs: - name: Upload tests uses: actions/upload-artifact@v4 with: - name: ${{ matrix.build.runs-on }}-tests + name: ${{ matrix.build.runs-on }}-${{ matrix.build.name }}-tests path: ${{ steps.strings.outputs.build-output-dir }}/test - name: Upload Test Report @@ -649,7 +649,7 @@ jobs: - name: Download Tests uses: actions/download-artifact@v4 with: - name: ${{ matrix.build.runs-on }}-test + name: ${{ matrix.build.runs-on }}-${{ matrix.build.name }}-test - name: Run tt-explorer tests shell: bash From d11367e2f5d5441246b1b043003149f002912039 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 12 Dec 2024 16:27:43 +0000 Subject: [PATCH 15/22] Fixed artifact name -- again --- .github/workflows/build-and-test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 18129e3622..3bcf88a050 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -161,7 +161,7 @@ jobs: - name: Upload tests uses: actions/upload-artifact@v4 with: - name: ${{ matrix.build.runs-on }}-${{ matrix.build.name }}-tests + name: ${{ matrix.build.name }}-tests path: ${{ steps.strings.outputs.build-output-dir }}/test - name: Upload Test Report @@ -649,7 +649,7 @@ jobs: - name: Download Tests uses: actions/download-artifact@v4 with: - name: ${{ matrix.build.runs-on }}-${{ matrix.build.name }}-test + name: ${{ matrix.build.name }}-test - name: Run tt-explorer tests shell: bash From 499e011c46fe0f5620c550ffb2b5366a70aa69a6 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 13 Jan 2025 17:45:18 +0000 Subject: [PATCH 16/22] Updated PR + Fixed artifact name --- .github/actions/build-tt-mlir-action/action.yaml | 11 +++++++++++ .github/workflows/build-and-test.yml | 8 +++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/.github/actions/build-tt-mlir-action/action.yaml b/.github/actions/build-tt-mlir-action/action.yaml index 104d233b52..f8eb67ba8d 100644 --- a/.github/actions/build-tt-mlir-action/action.yaml +++ b/.github/actions/build-tt-mlir-action/action.yaml @@ -105,12 +105,23 @@ runs: working-directory: ${{ inputs.install-output-dir }} run: tar cvf artifact.tar . + - name: Archive Build Directory + shell: bash + working-directory: ${{ inputs.build-output-dir }} + run: tar cvf artifact.tar . + - name: Upload Install Folder uses: actions/upload-artifact@v4 with: name: install-artifacts-${{ inputs.build-name }} path: ${{ inputs.install-output-dir }}/artifact.tar + - name: Upload Build Folder + uses: actions/upload-artifact@v4 + with: + name: build-artifacts-${{ inputs.build-name }} + path: ${{ inputs.build-output-dir }}/artifact.tar + - name: Get Latest Tag and Version shell: bash run: | diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 0ddfd00609..8e3790074d 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -584,7 +584,13 @@ jobs: - name: Download Tests uses: actions/download-artifact@v4 with: - name: ${{ matrix.build.name }}-test + name: build-artifact-${{ matrix.build.name }} + path: ${{ steps.strings.outputs.build-output-dir }} + + - name: 'Untar build directory' + shell: bash + working-directory: ${{ steps.strings.outputs.install-output-dir }} + run: tar xvf artifact.tar - name: Run tt-explorer tests shell: bash From 1b60bcc6730ca6888531c7a77e0d5ce5be222203 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 13 Jan 2025 17:55:11 +0000 Subject: [PATCH 17/22] Small fixes --- test/ttmlir/Explorer/load/open_llama_3b_single_layer.mlir | 2 ++ tools/explorer/test/run_tests.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/ttmlir/Explorer/load/open_llama_3b_single_layer.mlir b/test/ttmlir/Explorer/load/open_llama_3b_single_layer.mlir index 97731870ba..ac97371031 100644 --- a/test/ttmlir/Explorer/load/open_llama_3b_single_layer.mlir +++ b/test/ttmlir/Explorer/load/open_llama_3b_single_layer.mlir @@ -1,3 +1,5 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s + #loc = loc("LlamaForCausalLM":0:0) #system_desc = #tt.system_desc<[{role = host, target_triple = "x86_64-pc-linux-gnu"}], [{arch = , grid = 8x8, l1_size = 1499136, num_dram_channels = 12, dram_channel_size = 1073741824, noc_l1_address_align_bytes = 16, pcie_address_align_bytes = 32, noc_dram_address_align_bytes = 32, l1_unreserved_base = 1024, erisc_l1_unreserved_base = 1024, dram_unreserved_base = 1024, dram_unreserved_end = 1073741824, physical_cores = {worker = [ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 1x0, 1x1, 1x2, 1x3, 1x4, 1x5, 1x6, 1x7, 2x0, 2x1, 2x2, 2x3, 2x4, 2x5, 2x6, 2x7, 3x0, 3x1, 3x2, 3x3, 3x4, 3x5, 3x6, 3x7, 4x0, 4x1, 4x2, 4x3, 4x4, 4x5, 4x6, 4x7, 5x0, 5x1, 5x2, 5x3, 5x4, 5x5, 5x6, 5x7, 6x0, 6x1, 6x2, 6x3, 6x4, 6x5, 6x6, 6x7, 7x0, 7x1, 7x2, 7x3, 7x4, 7x5, 7x6, 7x7] dram = [ 8x0, 9x0, 10x0, 8x1, 9x1, 10x1, 8x2, 9x2, 10x2, 8x3, 9x3, 10x3]}, supported_data_types = [, , , , , , , , , , , ], supported_tile_sizes = [ 4x16, 16x16, 32x16, 4x32, 16x32, 32x32], num_cbs = 32}], [0], [3 : i32], [ 0x0x0x0]> module @LlamaForCausalLM attributes {tt.system_desc = #system_desc} { diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index 9e03fd71a5..9f48ed0988 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -15,7 +15,7 @@ COMMAND_URL = "http://" + HOST + ":" + str(PORT) + "/apipost/v1/send_command" TEST_LOAD_MODEL_PATHS = [ "test/ttmlir/Dialect/TTNN/optimizer/mnist_sharding.mlir", - "test/ttmlir/Explorer/*.mlir", + "test/ttmlir/Explorer/**/*.mlir", ] MNIST_SHARDING_PATH = "test/ttmlir/Silicon/TTNN/optimizer/mnist_sharding.mlir" TEST_EXECUTE_MODEL_PATHS = [ From 6cf26f748ab57ef42f93ad169e0ec82632851216 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 13 Jan 2025 18:53:57 +0000 Subject: [PATCH 18/22] Remove race condition --- .github/workflows/build-and-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 8e3790074d..9ae3ca36ea 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -513,7 +513,7 @@ jobs: pytest -ssv runtime/test/python/ttnn/test_runtime_api.py build-and-test-explorer: - needs: build-image + needs: build-ttmlir timeout-minutes: 60 strategy: fail-fast: false From 8b2349cde6082822c2914382a1071ed1023f2899 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Tue, 21 Jan 2025 17:16:34 +0000 Subject: [PATCH 19/22] Needs list --- .github/workflows/build-and-test.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 9ae3ca36ea..c83a5bc01b 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -513,7 +513,9 @@ jobs: pytest -ssv runtime/test/python/ttnn/test_runtime_api.py build-and-test-explorer: - needs: build-ttmlir + needs: + - build-image + - build-ttmlir timeout-minutes: 60 strategy: fail-fast: false From f028538d7d61c2a7429caf703a67ae29971688ac Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 22 Jan 2025 17:48:40 +0000 Subject: [PATCH 20/22] Silly Spelling Error --- .github/workflows/build-and-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 6f603c8e4d..5749d28722 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -606,7 +606,7 @@ jobs: - name: Download Tests uses: actions/download-artifact@v4 with: - name: build-artifact-${{ matrix.build.name }} + name: build-artifacts-${{ matrix.build.name }} path: ${{ steps.strings.outputs.build-output-dir }} - name: 'Untar build directory' From 02b77613f08f8f254d4c46aefed54f78f08d1351 Mon Sep 17 00:00:00 2001 From: Vraj Prajapati Date: Wed, 22 Jan 2025 19:15:47 +0000 Subject: [PATCH 21/22] Another stupid mistake --- .github/workflows/build-and-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 5749d28722..bdaa6d3b14 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -611,7 +611,7 @@ jobs: - name: 'Untar build directory' shell: bash - working-directory: ${{ steps.strings.outputs.install-output-dir }} + working-directory: ${{ steps.strings.outputs.build-output-dir }} run: tar xvf artifact.tar - name: Run tt-explorer tests From bc0d5344c67c326a138d549a2ca84c3e23e2cd43 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Mon, 3 Feb 2025 18:05:17 +0000 Subject: [PATCH 22/22] Fixed failing Tests + added TTIR tests --- test/ttmlir/Explorer/load/llama_attention.mlir | 2 ++ tools/explorer/test/run_tests.py | 13 +++++++------ tools/explorer/tt_adapter/src/tt_adapter/mlir.py | 14 +++++++++----- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/test/ttmlir/Explorer/load/llama_attention.mlir b/test/ttmlir/Explorer/load/llama_attention.mlir index a5fc0e0cb7..8bf2eb349c 100644 --- a/test/ttmlir/Explorer/load/llama_attention.mlir +++ b/test/ttmlir/Explorer/load/llama_attention.mlir @@ -1,3 +1,5 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s + module @SelfAttention attributes {tt.system_desc = #tt.system_desc<[{role = host, target_triple = "x86_64-pc-linux-gnu"}], [{arch = , grid = 8x8, l1_size = 1499136, num_dram_channels = 12, dram_channel_size = 1073741824, noc_l1_address_align_bytes = 16, pcie_address_align_bytes = 32, noc_dram_address_align_bytes = 32, l1_unreserved_base = 1024, erisc_l1_unreserved_base = 1024, dram_unreserved_base = 1024, dram_unreserved_end = 1073741824, physical_cores = {worker = [ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 1x0, 1x1, 1x2, 1x3, 1x4, 1x5, 1x6, 1x7, 2x0, 2x1, 2x2, 2x3, 2x4, 2x5, 2x6, 2x7, 3x0, 3x1, 3x2, 3x3, 3x4, 3x5, 3x6, 3x7, 4x0, 4x1, 4x2, 4x3, 4x4, 4x5, 4x6, 4x7, 5x0, 5x1, 5x2, 5x3, 5x4, 5x5, 5x6, 5x7, 6x0, 6x1, 6x2, 6x3, 6x4, 6x5, 6x6, 6x7, 7x0, 7x1, 7x2, 7x3, 7x4, 7x5, 7x6, 7x7] dram = [ 8x0, 9x0, 10x0, 8x1, 9x1, 10x1, 8x2, 9x2, 10x2, 8x3, 9x3, 10x3]}, supported_data_types = [, , , , , , , , , , , ], supported_tile_sizes = [ 4x16, 16x16, 32x16, 4x32, 16x32, 32x32], num_cbs = 32}], [0], [3 : i32], [ 0x0x0x0]>} { func.func @forward(%arg0: tensor<1x12x3200xf32> {ttir.name = "hidden_states_1"}, %arg1: tensor<1x1x12x12xf32> {ttir.name = "attention_mask"}, %arg2: tensor<1x12xf32> {ttir.name = "position_ids"}, %arg3: tensor<1x50x1xf32> {ttir.name = "input_0_unsqueeze_12"}, %arg4: tensor<1x32x50x100xf32> {ttir.name = "dc.input_tensor.index_25.2"}, %arg5: tensor<1xf32> {ttir.name = "input_1_multiply_26"}, %arg6: tensor<1x32x50x100xf32> {ttir.name = "dc.input_tensor.index_27.2"}, %arg7: tensor<1x32x50x100xf32> {ttir.name = "dc.input_tensor.index_39.2"}, %arg8: tensor<1xf32> {ttir.name = "input_1_multiply_40"}, %arg9: tensor<1x32x50x100xf32> {ttir.name = "dc.input_tensor.index_41.2"}, %arg10: tensor<1xf32> {ttir.name = "input_1_multiply_49"}, %arg11: tensor<3200x3200xf32> {ttir.name = "model.q_proj.weight"}, %arg12: tensor<3200x3200xf32> {ttir.name = "model.k_proj.weight"}, %arg13: tensor<3200x3200xf32> {ttir.name = "model.v_proj.weight"}, %arg14: tensor<3200x3200xf32> {ttir.name = "model.o_proj.weight"}) -> (tensor<1x12x3200xf32> {ttir.name = "SelfAttention.output_reshape_67"}) { %0 = tensor.empty() : tensor<12x3200xf32> diff --git a/tools/explorer/test/run_tests.py b/tools/explorer/test/run_tests.py index 9f48ed0988..e41be643b2 100644 --- a/tools/explorer/test/run_tests.py +++ b/tools/explorer/test/run_tests.py @@ -16,6 +16,7 @@ TEST_LOAD_MODEL_PATHS = [ "test/ttmlir/Dialect/TTNN/optimizer/mnist_sharding.mlir", "test/ttmlir/Explorer/**/*.mlir", + "test/ttmlir/Silicon/TTNN/**/*.mlir", ] MNIST_SHARDING_PATH = "test/ttmlir/Silicon/TTNN/optimizer/mnist_sharding.mlir" TEST_EXECUTE_MODEL_PATHS = [ @@ -156,7 +157,7 @@ def test_load_model(model_path): @pytest.mark.parametrize("model_path", get_test_files(TEST_EXECUTE_MODEL_PATHS)) def test_execute_model(model_path): execute_command_and_wait( - model_path, {"optimizationPolicy": "DF Sharding"}, timeout=60 + model_path, {"optimizationPolicy": "DF Sharding"}, timeout=300 ) convert_command_and_assert(model_path) @@ -165,7 +166,7 @@ def test_execute_mnist_l1_interleaved(): execute_command_and_wait( MNIST_SHARDING_PATH, {"optimizationPolicy": "Greedy L1 Interleaved"}, - timeout=60, + timeout=300, ) convert_command_and_assert(MNIST_SHARDING_PATH) @@ -174,7 +175,7 @@ def test_execute_mnist_optimizer_disabled(): execute_command_and_wait( MNIST_SHARDING_PATH, {"optimizationPolicy": "Optimizer Disabled"}, - timeout=60, + timeout=300, ) convert_command_and_assert(MNIST_SHARDING_PATH) @@ -195,7 +196,7 @@ def test_execute_mnist_with_overrides(): execute_command_and_wait( MNIST_SHARDING_PATH, {"optimizationPolicy": "DF Sharding", "overrides": overrides}, - timeout=60, + timeout=300, ) convert_command_and_assert(MNIST_SHARDING_PATH) @@ -204,7 +205,7 @@ def test_execute_and_check_perf_data_exists(): execute_command_and_wait( MNIST_SHARDING_PATH, {"optimizationPolicy": "DF Sharding"}, - timeout=60, + timeout=300, ) result = convert_command_and_assert(MNIST_SHARDING_PATH) assert "perf_data" in result["graphs"][0] @@ -215,5 +216,5 @@ def test_execute_model_invalid_policy(): execute_command_and_wait( TEST_EXECUTE_MODEL_PATHS[0], {"optimizationPolicy": "Invalid Policy"}, - timeout=60, + timeout=300, ) diff --git a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py index 7abe46bd4d..86695e9773 100644 --- a/tools/explorer/tt_adapter/src/tt_adapter/mlir.py +++ b/tools/explorer/tt_adapter/src/tt_adapter/mlir.py @@ -30,7 +30,7 @@ class AttrHandler: @staticmethod def default_parser(attr): - return [graph_builder.KeyValue(key=attr.name, value=str(attr.attr))] + return [graph_builder.KeyValue(key=str(attr.name), value=str(attr.attr))] @staticmethod def parse_attr(attr): @@ -515,7 +515,11 @@ def get_attributes(self): # Add output tensor attriributes to the op itself if self.op.results: - output_tensor = self.op.result + # Examples like the Pooling Op Contain more than 1 Result Tensor + # Since the output of a pool op is currently the same shape we don't have to add any extra logic + # In the future we may have to obfuscate with output_shape_1, etc... + # For now let's just set the output_tensor to the first result + output_tensor = list(self.op.results)[0] output_attrs = [] if isinstance(output_tensor.type, ir.RankedTensorType): output_attrs = [ @@ -549,7 +553,7 @@ def get_attributes(self): def make_graph_node(self): return graph_builder.GraphNode( id=self.id, - label=self.op.name, + label=str(self.op.name), namespace=self.get_namespace(), attrs=self.get_attributes(), ) @@ -557,7 +561,7 @@ def make_graph_node(self): def make_constant_node(self, constant_name): return graph_builder.GraphNode( id=self._create_unique_id(), - label=constant_name, + label=str(constant_name), namespace=self.get_namespace(), ) @@ -690,7 +694,7 @@ def build_graph(module, perf_trace=None): id=str(output_connections[source_node.id]), attrs=[ graph_builder.KeyValue( - key="__tensor_tag", value=target_node.label + key="__tensor_tag", value=str(target_node.label) ), ] + output_attrs,