diff --git a/include/ttmlir/Conversion/TTNNToEmitC/Utils.h b/include/ttmlir/Conversion/TTNNToEmitC/Utils.h index a2644728a6..548c4ee3e0 100644 --- a/include/ttmlir/Conversion/TTNNToEmitC/Utils.h +++ b/include/ttmlir/Conversion/TTNNToEmitC/Utils.h @@ -62,15 +62,10 @@ emitc::OpaqueAttr convertArrayAttrToSpan(Builder &builder, ArrayAttr attr); // emitc::OpaqueAttr createStdNullopt(Builder &builder); -// Helper enum to differentiate between ttnn::Shape and ttnn::SimpleShape -// -enum class ShapeType { SimpleShape = 0, Shape = 1 }; - -// Create emitc::CallOpaqueOp to ttnn::SimpleShape (Shape) constructor +// Create emitc::CallOpaqueOp to ttnn::Shape constructor // emitc::CallOpaqueOp createShapeOp(ConversionPatternRewriter &rewriter, - ttnn::ShapeAttr shapeAttr, Location loc, - ShapeType shapeType = ShapeType::SimpleShape); + ttnn::ShapeAttr shapeAttr, Location loc); // Create ttnn::MemoryConfig and return emitc::CallOpaqueOp // diff --git a/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp b/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp index 5238be6f0b..eaac7f2b99 100644 --- a/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp +++ b/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp @@ -568,11 +568,10 @@ class RepeatOpConversionPattern ConversionPatternRewriter &rewriter) const override { ttnn::ShapeAttr repeatDims = repeatOp.getRepeatDimsAttr(); - // Create ttnn::SimpleShape() call + // Create ttnn::Shape() call // emitc::CallOpaqueOp shapeOp = ttnn_to_emitc::utils::createShapeOp( - rewriter, repeatDims, repeatOp.getLoc(), - ttnn_to_emitc::utils::ShapeType::SimpleShape); + rewriter, repeatDims, repeatOp.getLoc()); // Create operands vector // @@ -843,7 +842,7 @@ class EmptyOpConversionPattern getDeviceOp = currGetDeviceOp; }); - // Create ttnn::SimpleShape() call. + // Create ttnn::Shape() call. // emitc::CallOpaqueOp shapeOp = ttnn_to_emitc::utils::createShapeOp( rewriter, shapeAttr, srcOp.getLoc()); @@ -865,7 +864,7 @@ class EmptyOpConversionPattern // Create ArrayAttr object holding attributes and pointers to operands. // ArrayAttr arrayAttr = rewriter.getArrayAttr({ - rewriter.getIndexAttr(0), // ttnn::SimpleShape + rewriter.getIndexAttr(0), // ttnn::Shape ttnn_to_emitc::utils::convertDType(rewriter, dataTypeAttr), ttnn_to_emitc::utils::convertLayoutAttr(rewriter, layoutAttr), rewriter.getIndexAttr(1), // ttnn::Device @@ -910,7 +909,7 @@ class OnesOpConversionPattern // Attrs (like shape) need to be instantiated into objects before being // passed to the op. Therefore: // - // We first create a ttnn::SimpleShape object (SSA) by calling + // We first create a ttnn::Shape object (SA) by calling // createShapeOp() and add it to the operands vector, but also add an // IndexAttr in ArrayAttr to reference it (this is an EmitC mechanism that // allows for combining Attrs and Values when calling an OpaqueOp). All the @@ -919,7 +918,7 @@ class OnesOpConversionPattern // IndexAttr. If they are present, we create the object and pass it to the // op. If not, we pass std::nullopt. - // Create ttnn::SimpleShape() call + // Create ttnn::Shape() call // emitc::CallOpaqueOp shapeOp = ttnn_to_emitc::utils::createShapeOp( rewriter, srcOp.getShapeAttr(), srcOp.getLoc()); @@ -935,7 +934,7 @@ class OnesOpConversionPattern // size_t operandIndex = 0; ArrayAttr arrayAttr = rewriter.getArrayAttr({ - rewriter.getIndexAttr(operandIndex++), // ttnn::SimpleShape + rewriter.getIndexAttr(operandIndex++), // ttnn::Shape srcOp.getDtype().has_value() ? ttnn_to_emitc::utils::convertDType(rewriter, srcOp.getDtypeAttr()) : ttnn_to_emitc::utils::createStdNullopt( diff --git a/lib/Conversion/TTNNToEmitC/Utils.cpp b/lib/Conversion/TTNNToEmitC/Utils.cpp index 564357bfdc..1c31465724 100644 --- a/lib/Conversion/TTNNToEmitC/Utils.cpp +++ b/lib/Conversion/TTNNToEmitC/Utils.cpp @@ -193,10 +193,8 @@ emitc::OpaqueAttr createStdNullopt(Builder &builder) { } emitc::CallOpaqueOp createShapeOp(ConversionPatternRewriter &rewriter, - ttnn::ShapeAttr shapeAttr, Location loc, - ShapeType shapeType) { - llvm::StringRef shapeTypeStr = - shapeType == ShapeType::SimpleShape ? "ttnn::SimpleShape" : "ttnn::Shape"; + ttnn::ShapeAttr shapeAttr, Location loc) { + llvm::StringRef shapeTypeStr = "ttnn::Shape"; return rewriter.create( loc, emitc::OpaqueType::get(rewriter.getContext(), shapeTypeStr), diff --git a/lib/OpModel/TTNN/Conversion.cpp b/lib/OpModel/TTNN/Conversion.cpp index fa4e116b6b..b31a454b0a 100644 --- a/lib/OpModel/TTNN/Conversion.cpp +++ b/lib/OpModel/TTNN/Conversion.cpp @@ -38,13 +38,13 @@ getDataType(const mlir::tt::ttnn::TTNNLayoutAttr layout) { } } -::ttnn::SimpleShape getSimpleShape(const ::llvm::ArrayRef shape) { +::ttnn::Shape getShape(const ::llvm::ArrayRef shape) { ::tt::tt_metal::SmallVector small_vector_shape; for (const auto &dim : shape) { small_vector_shape.push_back(static_cast(dim)); } - return ::ttnn::SimpleShape(small_vector_shape); + return ::ttnn::Shape(small_vector_shape); } const std::array @@ -147,7 +147,7 @@ getTensorLayout(const mlir::tt::ttnn::TTNNLayoutAttr &layout) { ::ttnn::TensorSpec getTensorSpec(const ::llvm::ArrayRef shape, const mlir::tt::ttnn::TTNNLayoutAttr &layout) { - return ::ttnn::TensorSpec(getSimpleShape(shape), getTensorLayout(layout)); + return ::ttnn::TensorSpec(getShape(shape), getTensorLayout(layout)); } } // namespace conversion diff --git a/lib/OpModel/TTNN/Conversion.hpp b/lib/OpModel/TTNN/Conversion.hpp index 137bc180d3..be2a070802 100644 --- a/lib/OpModel/TTNN/Conversion.hpp +++ b/lib/OpModel/TTNN/Conversion.hpp @@ -14,7 +14,7 @@ namespace conversion { ::tt::tt_metal::DataType getDataType(const mlir::tt::ttnn::TTNNLayoutAttr layout); -::ttnn::SimpleShape getSimpleShape(const ::llvm::ArrayRef shape); +::ttnn::Shape getShape(const ::llvm::ArrayRef shape); const std::array getShardShape(const mlir::tt::ttnn::TTNNLayoutAttr &layout); diff --git a/runtime/lib/ttnn/operations/ccl/mesh_shard.cpp b/runtime/lib/ttnn/operations/ccl/mesh_shard.cpp index b7d8153a7c..60e3558e06 100644 --- a/runtime/lib/ttnn/operations/ccl/mesh_shard.cpp +++ b/runtime/lib/ttnn/operations/ccl/mesh_shard.cpp @@ -21,12 +21,12 @@ void FullToShardShape(const ::ttnn::Tensor &input, ::ttnn::Tensor &out, *::ttnn::distributed::replicate_tensor_to_mesh_mapper(meshDevice)); } else { DEBUG_ASSERT( - input.get_shape().rank() > 1, + input.get_logical_shape().rank() > 1, "Sharding requires higher than 2 dimensional tensor. Tensor rank=", - input.get_shape().rank()); + input.get_logical_shape().rank()); auto rowMesh = static_cast(shardShape[0]); auto colMesh = static_cast(shardShape[1]); - int lastDim = input.get_shape().rank() - 1; + int lastDim = input.get_logical_shape().rank() - 1; ::ttnn::distributed::Shard2dConfig shard2dConfig; // last tile replicate @@ -61,7 +61,7 @@ void ShardToFullShape(const ::ttnn::Tensor &input, ::ttnn::Tensor &out, } else { auto rowMesh = static_cast(shardShape[0]); auto colMesh = static_cast(shardShape[1]); - int lastDim = input.get_shape().rank() - 1; + int lastDim = input.get_logical_shape().rank() - 1; if ((rowMesh * colMesh) == (meshDevice.num_rows() * meshDevice.num_cols())) { // Full multi-device storage concatenation diff --git a/runtime/lib/ttnn/operations/creation/empty.cpp b/runtime/lib/ttnn/operations/creation/empty.cpp index f14ececa7f..530bf8cd33 100644 --- a/runtime/lib/ttnn/operations/creation/empty.cpp +++ b/runtime/lib/ttnn/operations/creation/empty.cpp @@ -11,7 +11,7 @@ namespace tt::runtime::ttnn::operations::creation { struct EmptyTensorConfig { - ::ttnn::SimpleShape shape; + ::ttnn::Shape shape; ::ttnn::DataType dtype; ::ttnn::Layout layout; uint32_t numShards; diff --git a/runtime/lib/ttnn/operations/creation/full.cpp b/runtime/lib/ttnn/operations/creation/full.cpp index 1326e1efe2..200dc6969f 100644 --- a/runtime/lib/ttnn/operations/creation/full.cpp +++ b/runtime/lib/ttnn/operations/creation/full.cpp @@ -11,7 +11,7 @@ namespace tt::runtime::ttnn::operations::creation { struct FullTensorConfig { - ::ttnn::SimpleShape shape; + ::ttnn::Shape shape; ::ttnn::DataType dtype; ::ttnn::Layout layout; float fillValue; diff --git a/runtime/lib/ttnn/operations/creation/ones.cpp b/runtime/lib/ttnn/operations/creation/ones.cpp index f2bd6c48ad..08a04beeea 100644 --- a/runtime/lib/ttnn/operations/creation/ones.cpp +++ b/runtime/lib/ttnn/operations/creation/ones.cpp @@ -18,7 +18,7 @@ namespace tt::runtime::ttnn::operations::creation { void run(const ::tt::target::ttnn::OnesOp *op, ProgramContext &context) { ProgramTensorPool &tensorPool = context.getTensorPool(); - const ::ttnn::SimpleShape shape = ::ttnn::SimpleShape( + const ::ttnn::Shape shape = ::ttnn::Shape( ::tt::runtime::ttnn::utils::toShapeFromFBShape(*op->shape())); std::optional<::ttnn::DataType> dtype = std::optional<::ttnn::DataType>(); diff --git a/runtime/lib/ttnn/operations/data_movement/repeat.cpp b/runtime/lib/ttnn/operations/data_movement/repeat.cpp index 2fd66992e1..39bae64df6 100644 --- a/runtime/lib/ttnn/operations/data_movement/repeat.cpp +++ b/runtime/lib/ttnn/operations/data_movement/repeat.cpp @@ -13,7 +13,7 @@ void run(const ::tt::target::ttnn::RepeatOp *op, ProgramContext &context) { DEBUG_ASSERT(in.is_allocated()); const auto *fbShape = op->repeat_dims(); const std::vector repeatDims(fbShape->begin(), fbShape->end()); - ::ttnn::SimpleShape repeatDimsShape(repeatDims); + ::ttnn::Shape repeatDimsShape(repeatDims); ::ttnn::Tensor out = ::ttnn::repeat(in, repeatDimsShape); tensorPool.insert_or_assign(op->out()->global_id(), out); } diff --git a/runtime/lib/ttnn/operations/pool/maxpool2d.cpp b/runtime/lib/ttnn/operations/pool/maxpool2d.cpp index 7b53755f1d..1565e30408 100644 --- a/runtime/lib/ttnn/operations/pool/maxpool2d.cpp +++ b/runtime/lib/ttnn/operations/pool/maxpool2d.cpp @@ -19,9 +19,8 @@ template static ::ttnn::Tensor preshardForMaxPool2d(const ::tt::target::ttnn::MaxPool2dOp *op, DeviceType &device, const ::ttnn::Tensor &input) { - const ::ttnn::SimpleShape inputShape{ - ::tt::runtime::ttnn::utils::toShapeFromFBShape( - *op->in()->desc()->shape())}; + const ::ttnn::Shape inputShape{::tt::runtime::ttnn::utils::toShapeFromFBShape( + *op->in()->desc()->shape())}; uint32_t output_height = 1 + (op->input_height() + 2 * op->padding_height() - op->dilation_height() * (op->kernel_height() - 1) - 1) / diff --git a/runtime/lib/ttnn/runtime.cpp b/runtime/lib/ttnn/runtime.cpp index 7c63f5f123..b661694162 100644 --- a/runtime/lib/ttnn/runtime.cpp +++ b/runtime/lib/ttnn/runtime.cpp @@ -78,7 +78,7 @@ createOwnedTensor(std::shared_ptr data, return ::ttnn::Tensor( createStorage(data.get(), numElements, dataType), - ::ttnn::SimpleShape(shape), utils::toTTNNDataType(dataType), + ::ttnn::Shape(shape), utils::toTTNNDataType(dataType), ::ttnn::Layout::ROW_MAJOR); } @@ -122,7 +122,7 @@ Tensor createTensor(std::shared_ptr data, auto tensor = std::make_shared<::ttnn::Tensor>( createStorage(data.get(), numElements, dataType), - ::ttnn::SimpleShape(shape), utils::toTTNNDataType(dataType), + ::ttnn::Shape(shape), utils::toTTNNDataType(dataType), ::ttnn::Layout::ROW_MAJOR); return Tensor(std::static_pointer_cast(tensor), nullptr, DeviceRuntime::TTNN); @@ -170,7 +170,7 @@ Tensor createTensor(Device device, Layout layout, ::ttnn::Tensor tensor = std::visit( [&](auto &&device) -> ::ttnn::Tensor { return ::ttnn::operations::core::allocate_tensor_on_device( - ::ttnn::SimpleShape(shape), layoutDesc.dataType, layoutDesc.layout, + ::ttnn::Shape(shape), layoutDesc.dataType, layoutDesc.layout, &(device.get()), layoutDesc.memoryConfig); }, targetDevice); diff --git a/runtime/test/include/tt/runtime/ttnn/test/dylib.cpp b/runtime/test/include/tt/runtime/ttnn/test/dylib.cpp index c2232a56eb..374a1b464e 100644 --- a/runtime/test/include/tt/runtime/ttnn/test/dylib.cpp +++ b/runtime/test/include/tt/runtime/ttnn/test/dylib.cpp @@ -105,8 +105,9 @@ bool compareOuts(std::vector &lhs, std::vector &rhs) { LOG_ASSERT(lhsTensor->get_dtype() == rhsTensor->get_dtype(), "DType: ", static_cast(lhsTensor->get_dtype()), ", ", static_cast(rhsTensor->get_dtype())); - LOG_ASSERT(lhsTensor->get_shape() == rhsTensor->get_shape(), - "Shape: ", lhsTensor->get_shape(), ", ", rhsTensor->get_shape()); + LOG_ASSERT(lhsTensor->get_logical_shape() == rhsTensor->get_logical_shape(), + "Shape: ", lhsTensor->get_logical_shape(), ", ", + rhsTensor->get_logical_shape()); LOG_ASSERT(lhsTensor->get_layout() == rhsTensor->get_layout(), "Layout: ", static_cast(lhsTensor->get_layout()), ", ", static_cast(rhsTensor->get_layout())); diff --git a/test/unittests/OpModel/TTNN/Conversion/TestConversion.cpp b/test/unittests/OpModel/TTNN/Conversion/TestConversion.cpp index d38857ec7d..2444666b3c 100644 --- a/test/unittests/OpModel/TTNN/Conversion/TestConversion.cpp +++ b/test/unittests/OpModel/TTNN/Conversion/TestConversion.cpp @@ -13,14 +13,14 @@ class MlirToTtnnConversion : public OpModelFixture {}; //================================================================================ // getDataType //================================================================================ -class MlirToTtnnConversionSimpleShape +class MlirToTtnnConversionShape : public MlirToTtnnConversion, public testing::WithParamInterface> {}; -TEST_P(MlirToTtnnConversionSimpleShape, SimpleShape) { +TEST_P(MlirToTtnnConversionShape, Shape) { const auto &tensorShape = GetParam(); const auto &shape = - mlir::tt::op_model::ttnn::conversion::getSimpleShape(tensorShape); + mlir::tt::op_model::ttnn::conversion::getShape(tensorShape); EXPECT_EQ(shape.size(), tensorShape.size()); for (size_t i = 0; i < shape.size(); ++i) { @@ -29,13 +29,13 @@ TEST_P(MlirToTtnnConversionSimpleShape, SimpleShape) { } INSTANTIATE_TEST_SUITE_P( - ToSimpleShape, MlirToTtnnConversionSimpleShape, + ToShape, MlirToTtnnConversionShape, ::testing::Values(mlir::SmallVector{64, 32}, mlir::SmallVector{64, 32, 128}, mlir::SmallVector{64, 32, 128, 256})); //================================================================================ -// getSimpleShape +// getShape //================================================================================ class MlirToTtnnConversionDataType : public MlirToTtnnConversion, @@ -501,13 +501,13 @@ TEST_F(MlirToTtnnConversion, TensorSpec) { const auto layout = CreateTiledLayout(tensorShape, mlir::tt::ttnn::BufferType::L1, mlir::tt::ttnn::TensorMemoryLayout::BlockSharded); - const auto ttnnSimpleShape = - mlir::tt::op_model::ttnn::conversion::getSimpleShape(tensorShape); + const auto ttnnShape = + mlir::tt::op_model::ttnn::conversion::getShape(tensorShape); const auto ttnnLayout = mlir::tt::op_model::ttnn::conversion::getTensorLayout(layout); const auto tensorSpec = mlir::tt::op_model::ttnn::conversion::getTensorSpec( tensorShape, layout); - EXPECT_EQ(tensorSpec.logical_shape().volume(), ttnnSimpleShape.volume()); + EXPECT_EQ(tensorSpec.logical_shape().volume(), ttnnShape.volume()); EXPECT_EQ(tensorSpec.page_config().get_layout(), tt::tt_metal::Layout::TILE); } @@ -516,13 +516,13 @@ TEST_F(MlirToTtnnConversion, TensorSpec) { const auto layout = CreateRowMajorLayout(tensorShape, mlir::tt::ttnn::BufferType::L1, mlir::tt::ttnn::TensorMemoryLayout::BlockSharded); - const auto ttnnSimpleShape = - mlir::tt::op_model::ttnn::conversion::getSimpleShape(tensorShape); + const auto ttnnShape = + mlir::tt::op_model::ttnn::conversion::getShape(tensorShape); const auto ttnnLayout = mlir::tt::op_model::ttnn::conversion::getTensorLayout(layout); const auto tensorSpec = mlir::tt::op_model::ttnn::conversion::getTensorSpec( tensorShape, layout); - EXPECT_EQ(tensorSpec.logical_shape().volume(), ttnnSimpleShape.volume()); + EXPECT_EQ(tensorSpec.logical_shape().volume(), ttnnShape.volume()); EXPECT_EQ(tensorSpec.page_config().get_layout(), tt::tt_metal::Layout::ROW_MAJOR); } diff --git a/third_party/CMakeLists.txt b/third_party/CMakeLists.txt index b487b059b9..f96e516a39 100644 --- a/third_party/CMakeLists.txt +++ b/third_party/CMakeLists.txt @@ -1,6 +1,6 @@ include(ExternalProject) -set(TT_METAL_VERSION "9d69fb143bac50983dff914c5348539d0a7d2021") +set(TT_METAL_VERSION "f8fe02d9f591f4c7f929131630ce5147cfe88f8e") if ("$ENV{ARCH_NAME}" STREQUAL "grayskull") set(ARCH_NAME "grayskull") diff --git a/tools/ttnn-standalone/ttnn-standalone.cpp b/tools/ttnn-standalone/ttnn-standalone.cpp index 8be0a466ac..d03dec08b5 100644 --- a/tools/ttnn-standalone/ttnn-standalone.cpp +++ b/tools/ttnn-standalone/ttnn-standalone.cpp @@ -13,7 +13,7 @@ ttnn::Tensor add(ttnn::Tensor v1, ttnn::Tensor v2) { ttnn::Tensor v8 = ttnn::to_device(v2, v3, v7); ttnn::Tensor v9 = ttnn::to_layout(v8, ttnn::Layout::TILE, std::nullopt, std::nullopt, static_cast<::ttnn::IDevice *>(nullptr)); ttnn::deallocate(v8, false); - ttnn::SimpleShape v10 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({32, 32, })); + ttnn::Shape v10 = ttnn::Shape(tt::tt_metal::LegacyShape({32, 32, })); ttnn::MemoryConfig v11 = ttnn::MemoryConfig(ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); ttnn::Tensor v12 = ttnn::empty(v10, ttnn::DataType::BFLOAT16, ttnn::Layout::TILE, v3, v11); ttnn::Tensor v13 = ttnn::add(v6, v9, std::nullopt, std::nullopt, v12); @@ -27,9 +27,9 @@ ttnn::Tensor add(ttnn::Tensor v1, ttnn::Tensor v2) { } std::tuple createInputsFor_add() { - ttnn::SimpleShape v1 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({32, 32, })); + ttnn::Shape v1 = ttnn::Shape(tt::tt_metal::LegacyShape({32, 32, })); ttnn::Tensor v2 = ttnn::ones(v1, ttnn::DataType::BFLOAT16, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt); - ttnn::SimpleShape v3 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({32, 32, })); + ttnn::Shape v3 = ttnn::Shape(tt::tt_metal::LegacyShape({32, 32, })); ttnn::Tensor v4 = ttnn::ones(v3, ttnn::DataType::BFLOAT16, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt); return std::make_tuple(v2, v4); }