diff --git a/runtime/lib/ttnn/program.cpp b/runtime/lib/ttnn/program.cpp index 17134289c..26a967a25 100644 --- a/runtime/lib/ttnn/program.cpp +++ b/runtime/lib/ttnn/program.cpp @@ -710,14 +710,7 @@ vectorToArray(const std::vector &vec) { template static ::ttnn::Tensor invoke_reshape(const ::ttnn::Tensor &tensor, const std::vector &shape) { - // TDOO #686 - figure out how to call reshape in tile layout - if (tensor.get_layout() == ::ttnn::Layout::ROW_MAJOR) { - return ::ttnn::reshape(tensor, vectorToArray(shape)); - } - - auto rowMajorTensor = untilize(tensor); - auto res = ::ttnn::reshape(rowMajorTensor, vectorToArray(shape)); - return tilize(res); + return ::ttnn::reshape(tensor, vectorToArray(shape)); } static void run(::tt::target::ttnn::ReshapeOp const *op, diff --git a/test/ttmlir/Dialect/TTNN/reshape/reshape_fail_on_dims.mlir b/test/ttmlir/Dialect/TTNN/reshape/reshape_fail_on_dims.mlir deleted file mode 100644 index 039fb31f7..000000000 --- a/test/ttmlir/Dialect/TTNN/reshape/reshape_fail_on_dims.mlir +++ /dev/null @@ -1,10 +0,0 @@ -// RUN: not ttmlir-opt --ttir-to-ttnn-backend-pipeline %s 2>&1 | FileCheck %s -// CHECK: error: 'ttir.reshape' op Shape attribute must match the output tensor shape for dimensions that are not -1 -#any_device_tile = #tt.operand_constraint -module attributes {} { - func.func @forward(%arg0: tensor<4x2x32x34xbf16>) -> tensor<2x4x32x34xbf16> { - %0 = tensor.empty() : tensor<2x4x32x34xbf16> - %1 = "ttir.reshape"(%arg0, %0) <{shape = [3: i32, 4: i32, 32: i32, 34: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x34xbf16>, tensor<2x4x32x34xbf16>) -> tensor<2x4x32x34xbf16> - return %1 : tensor<2x4x32x34xbf16> - } -} diff --git a/test/ttmlir/Dialect/TTNN/reshape/simple_reshape.mlir b/test/ttmlir/Dialect/TTNN/reshape/simple_reshape.mlir deleted file mode 100644 index e3f979a36..000000000 --- a/test/ttmlir/Dialect/TTNN/reshape/simple_reshape.mlir +++ /dev/null @@ -1,10 +0,0 @@ -// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint -module attributes {} { - func.func @forward(%arg0: tensor<4x2x32x34xbf16>) -> tensor<2x4x32x34xbf16> { - %0 = tensor.empty() : tensor<2x4x32x34xbf16> - // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 34: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x34xbf16>, tensor<2x4x32x34xbf16>) -> tensor<2x4x32x34xbf16> - return %1 : tensor<2x4x32x34xbf16> - } -} diff --git a/test/ttmlir/Dialect/TTNN/simple_reshape.mlir b/test/ttmlir/Dialect/TTNN/simple_reshape.mlir new file mode 100644 index 000000000..6b7c0edfe --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/simple_reshape.mlir @@ -0,0 +1,10 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s +#any_device_tile = #tt.operand_constraint +module attributes {} { + func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<2x4x32x32xbf16> { + %0 = tensor.empty() : tensor<2x4x32x32xbf16> + // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] + %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> + return %1 : tensor<2x4x32x32xbf16> + } +} diff --git a/test/ttmlir/Silicon/TTNN/reshape/reshape_tile_aligned.mlir b/test/ttmlir/Silicon/TTNN/reshape/reshape_tile_aligned.mlir deleted file mode 100644 index a94a0bb0c..000000000 --- a/test/ttmlir/Silicon/TTNN/reshape/reshape_tile_aligned.mlir +++ /dev/null @@ -1,12 +0,0 @@ -// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir -// RUN: FileCheck %s --input-file=%t.mlir -// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint -module attributes {} { - func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<4x32x2x32xbf16> { - %0 = tensor.empty() : tensor<4x32x2x32xbf16> - // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.reshape"(%arg0, %0) <{shape = [4: i32, 32: i32, 2: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<4x32x2x32xbf16>) -> tensor<4x32x2x32xbf16> - return %1 : tensor<4x32x2x32xbf16> - } -} diff --git a/test/ttmlir/Silicon/TTNN/reshape/simple_reshape.mlir b/test/ttmlir/Silicon/TTNN/reshape/simple_reshape.mlir deleted file mode 100644 index 5bdd940a2..000000000 --- a/test/ttmlir/Silicon/TTNN/reshape/simple_reshape.mlir +++ /dev/null @@ -1,12 +0,0 @@ -// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir -// RUN: FileCheck %s --input-file=%t.mlir -// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint -module attributes {} { - func.func @forward(%arg0: tensor<4x2x32x34xbf16>) -> tensor<2x4x32x34xbf16> { - %0 = tensor.empty() : tensor<2x4x32x34xbf16> - // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 34: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x34xbf16>, tensor<2x4x32x34xbf16>) -> tensor<2x4x32x34xbf16> - return %1 : tensor<2x4x32x34xbf16> - } -}