diff --git a/runtime/lib/ttnn/program.cpp b/runtime/lib/ttnn/program.cpp index 26a967a257..17134289c5 100644 --- a/runtime/lib/ttnn/program.cpp +++ b/runtime/lib/ttnn/program.cpp @@ -710,7 +710,14 @@ vectorToArray(const std::vector &vec) { template static ::ttnn::Tensor invoke_reshape(const ::ttnn::Tensor &tensor, const std::vector &shape) { - return ::ttnn::reshape(tensor, vectorToArray(shape)); + // TDOO #686 - figure out how to call reshape in tile layout + if (tensor.get_layout() == ::ttnn::Layout::ROW_MAJOR) { + return ::ttnn::reshape(tensor, vectorToArray(shape)); + } + + auto rowMajorTensor = untilize(tensor); + auto res = ::ttnn::reshape(rowMajorTensor, vectorToArray(shape)); + return tilize(res); } static void run(::tt::target::ttnn::ReshapeOp const *op, diff --git a/test/ttmlir/Dialect/TTNN/reshape/reshape_fail_on_dims.mlir b/test/ttmlir/Dialect/TTNN/reshape/reshape_fail_on_dims.mlir new file mode 100644 index 0000000000..039fb31f79 --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/reshape/reshape_fail_on_dims.mlir @@ -0,0 +1,10 @@ +// RUN: not ttmlir-opt --ttir-to-ttnn-backend-pipeline %s 2>&1 | FileCheck %s +// CHECK: error: 'ttir.reshape' op Shape attribute must match the output tensor shape for dimensions that are not -1 +#any_device_tile = #tt.operand_constraint +module attributes {} { + func.func @forward(%arg0: tensor<4x2x32x34xbf16>) -> tensor<2x4x32x34xbf16> { + %0 = tensor.empty() : tensor<2x4x32x34xbf16> + %1 = "ttir.reshape"(%arg0, %0) <{shape = [3: i32, 4: i32, 32: i32, 34: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x34xbf16>, tensor<2x4x32x34xbf16>) -> tensor<2x4x32x34xbf16> + return %1 : tensor<2x4x32x34xbf16> + } +} diff --git a/test/ttmlir/Dialect/TTNN/reshape/simple_reshape.mlir b/test/ttmlir/Dialect/TTNN/reshape/simple_reshape.mlir new file mode 100644 index 0000000000..e3f979a36a --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/reshape/simple_reshape.mlir @@ -0,0 +1,10 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s +#any_device_tile = #tt.operand_constraint +module attributes {} { + func.func @forward(%arg0: tensor<4x2x32x34xbf16>) -> tensor<2x4x32x34xbf16> { + %0 = tensor.empty() : tensor<2x4x32x34xbf16> + // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] + %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 34: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x34xbf16>, tensor<2x4x32x34xbf16>) -> tensor<2x4x32x34xbf16> + return %1 : tensor<2x4x32x34xbf16> + } +} diff --git a/test/ttmlir/Dialect/TTNN/simple_reshape.mlir b/test/ttmlir/Dialect/TTNN/simple_reshape.mlir deleted file mode 100644 index 6b7c0edfe8..0000000000 --- a/test/ttmlir/Dialect/TTNN/simple_reshape.mlir +++ /dev/null @@ -1,10 +0,0 @@ -// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s -#any_device_tile = #tt.operand_constraint -module attributes {} { - func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<2x4x32x32xbf16> { - %0 = tensor.empty() : tensor<2x4x32x32xbf16> - // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> - return %1 : tensor<2x4x32x32xbf16> - } -} diff --git a/test/ttmlir/Silicon/TTNN/reshape/reshape_tile_aligned.mlir b/test/ttmlir/Silicon/TTNN/reshape/reshape_tile_aligned.mlir new file mode 100644 index 0000000000..a94a0bb0c7 --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/reshape/reshape_tile_aligned.mlir @@ -0,0 +1,12 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +#any_device_tile = #tt.operand_constraint +module attributes {} { + func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<4x32x2x32xbf16> { + %0 = tensor.empty() : tensor<4x32x2x32xbf16> + // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] + %1 = "ttir.reshape"(%arg0, %0) <{shape = [4: i32, 32: i32, 2: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<4x32x2x32xbf16>) -> tensor<4x32x2x32xbf16> + return %1 : tensor<4x32x2x32xbf16> + } +} diff --git a/test/ttmlir/Silicon/TTNN/reshape/simple_reshape.mlir b/test/ttmlir/Silicon/TTNN/reshape/simple_reshape.mlir new file mode 100644 index 0000000000..5bdd940a27 --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/reshape/simple_reshape.mlir @@ -0,0 +1,12 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +#any_device_tile = #tt.operand_constraint +module attributes {} { + func.func @forward(%arg0: tensor<4x2x32x34xbf16>) -> tensor<2x4x32x34xbf16> { + %0 = tensor.empty() : tensor<2x4x32x34xbf16> + // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] + %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 34: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x34xbf16>, tensor<2x4x32x34xbf16>) -> tensor<2x4x32x34xbf16> + return %1 : tensor<2x4x32x34xbf16> + } +}