From ed1125f6f94b1bf1a053b7bd892d5587aa28865b Mon Sep 17 00:00:00 2001 From: Muhammad Asif Manzoor Date: Tue, 5 Nov 2024 01:54:05 +0000 Subject: [PATCH] Stable HLO runtime tests --- .../Silicon/StableHLO/binary/add_op.mlir | 15 ++++ .../Silicon/StableHLO/binary/compare_op.mlir | 37 ++++++++++ .../Silicon/StableHLO/binary/concat_op.mlir | 70 +++++++++++++++++++ .../Silicon/StableHLO/binary/divide_op.mlir | 13 ++++ .../Silicon/StableHLO/binary/logical_op.mlir | 27 +++++++ .../Silicon/StableHLO/binary/maximum_op.mlir | 12 ++++ .../Silicon/StableHLO/binary/minimum_op.mlir | 12 ++++ .../Silicon/StableHLO/binary/multiply_op.mlir | 15 ++++ .../StableHLO/binary/remainder_op.mlir | 13 ++++ .../Silicon/StableHLO/binary/subtract_op.mlir | 13 ++++ .../Silicon/StableHLO/broadcast_op.mlir | 13 ++++ .../Silicon/StableHLO/composite_op.mlir | 19 +++++ .../StableHLO/constant/constant_bf16.mlir | 22 ++++++ .../StableHLO/constant/constant_bool.mlir | 17 +++++ .../StableHLO/constant/constant_f16.mlir | 22 ++++++ .../StableHLO/constant/constant_f32.mlir | 22 ++++++ .../StableHLO/constant/constant_i16.mlir | 22 ++++++ .../StableHLO/constant/constant_i32.mlir | 22 ++++++ .../StableHLO/constant/constant_i64.mlir | 22 ++++++ .../StableHLO/constant/constant_i8.mlir | 22 ++++++ test/ttmlir/Silicon/StableHLO/conv2d_op.mlir | 21 ++++++ test/ttmlir/Silicon/StableHLO/convert_op.mlir | 22 ++++++ .../Silicon/StableHLO/dot_general_op.mlir | 12 ++++ .../Silicon/StableHLO/maxpool2d_op.mlir | 16 +++++ .../Silicon/StableHLO/mnist_inference.mlir | 41 +++++++++++ .../Silicon/StableHLO/reduce_add_op.mlir | 12 ++++ .../Silicon/StableHLO/reduce_maximum_op.mlir | 12 ++++ test/ttmlir/Silicon/StableHLO/rehsape_op.mlir | 12 ++++ .../Silicon/StableHLO/scalar_add_op.mlir | 12 ++++ test/ttmlir/Silicon/StableHLO/sign_op.mlir | 13 ++++ test/ttmlir/Silicon/StableHLO/slice_op.mlir | 17 +++++ .../Silicon/StableHLO/unary/absolute_op.mlir | 13 ++++ .../Silicon/StableHLO/unary/cbrt_op.mlir | 13 ++++ .../Silicon/StableHLO/unary/ceil_op.mlir | 12 ++++ .../Silicon/StableHLO/unary/cosine_op.mlir | 12 ++++ .../unary/exponential_minus_one_op.mlir | 13 ++++ .../StableHLO/unary/exponential_op.mlir | 13 ++++ .../Silicon/StableHLO/unary/floor_op.mlir | 13 ++++ .../Silicon/StableHLO/unary/isfinite_op.mlir | 13 ++++ .../StableHLO/unary/log_plus_one_op.mlir | 13 ++++ .../Silicon/StableHLO/unary/negate_op.mlir | 13 ++++ .../Silicon/StableHLO/unary/rsqrt_op.mlir | 13 ++++ .../Silicon/StableHLO/unary/sine_op.mlir | 12 ++++ .../Silicon/StableHLO/unary/sqrt_op.mlir | 13 ++++ .../Silicon/StableHLO/unary/tranpose_op.mlir | 12 ++++ 45 files changed, 798 insertions(+) create mode 100644 test/ttmlir/Silicon/StableHLO/binary/add_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/compare_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/concat_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/divide_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/logical_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/maximum_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/minimum_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/multiply_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/remainder_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/binary/subtract_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/broadcast_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/composite_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/constant/constant_bf16.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/constant/constant_bool.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/constant/constant_f16.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/constant/constant_f32.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/constant/constant_i16.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/constant/constant_i32.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/constant/constant_i64.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/constant/constant_i8.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/conv2d_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/convert_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/dot_general_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/maxpool2d_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/mnist_inference.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/rehsape_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/scalar_add_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/sign_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/slice_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/absolute_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/cbrt_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/ceil_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/cosine_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/exponential_minus_one_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/exponential_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/floor_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/isfinite_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/log_plus_one_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/negate_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/rsqrt_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/sine_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/sqrt_op.mlir create mode 100644 test/ttmlir/Silicon/StableHLO/unary/tranpose_op.mlir diff --git a/test/ttmlir/Silicon/StableHLO/binary/add_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/add_op.mlir new file mode 100644 index 0000000000..2ecea027a9 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/add_op.mlir @@ -0,0 +1,15 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +#any_device_tile = #tt.operand_constraint + +module @jit_eltwise_add attributes {} { + func.func public @test_add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/compare_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/compare_op.mlir new file mode 100644 index 0000000000..e9716401dd --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/compare_op.mlir @@ -0,0 +1,37 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_compare attributes {} { + func.func public @test_eq(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare EQ, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_ne(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare NE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare GE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_gt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare GT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_le(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare LE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @test_lt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.compare LT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/concat_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/concat_op.mlir new file mode 100644 index 0000000000..005c8b646a --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/concat_op.mlir @@ -0,0 +1,70 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_concat attributes {} { + func.func public @test_concat(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 1 : i64 + } : (tensor<32x32xf32>, tensor<32x64xf32>) -> tensor<32x96xf32> + return %0 : tensor<32x96xf32> + } + + func.func public @test_concat_2(%arg0: tensor<3x2xi64>, %arg1: tensor<1x2xi64>) -> tensor<4x2xi64> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 0 : i64 + } : (tensor<3x2xi64>, tensor<1x2xi64>) -> tensor<4x2xi64> + return %0 : tensor<4x2xi64> + } + + func.func public @test_concat_3(%arg0: tensor<4x3xf32>, %arg1: tensor<4x5xf32>) -> tensor<4x8xf32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 1 : i64 + } : (tensor<4x3xf32>, tensor<4x5xf32>) -> tensor<4x8xf32> + return %0 : tensor<4x8xf32> + } + + func.func public @test_concat_4(%arg0: tensor<128x64xf32>, %arg1: tensor<128x96xf32>) -> tensor<128x160xf32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 1 : i64 + } : (tensor<128x64xf32>, tensor<128x96xf32>) -> tensor<128x160xf32> + return %0 : tensor<128x160xf32> + } + + func.func public @test_concat_5(%arg0: tensor<256x512xi64>, %arg1: tensor<256x256xi64>) -> tensor<256x768xi64> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 1 : i64 + } : (tensor<256x512xi64>, tensor<256x256xi64>) -> tensor<256x768xi64> + return %0 : tensor<256x768xi64> + } + + func.func public @test_concat_6(%arg0: tensor<64x32xf64>, %arg1: tensor<64x64xf64>) -> tensor<64x96xf64> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 1 : i64 + } : (tensor<64x32xf64>, tensor<64x64xf64>) -> tensor<64x96xf64> + return %0 : tensor<64x96xf64> + } + + func.func public @test_concat_7(%arg0: tensor<1000x128xi32>, %arg1: tensor<500x128xi32>) -> tensor<1500x128xi32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 0 : i64 + } : (tensor<1000x128xi32>, tensor<500x128xi32>) -> tensor<1500x128xi32> + return %0 : tensor<1500x128xi32> + } + + func.func public @test_concat_8(%arg0: tensor<3x2x4x5xf64>, %arg1: tensor<3x2x4x3xf64>) -> tensor<3x2x4x8xf64> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 3 : i64 + } : (tensor<3x2x4x5xf64>, tensor<3x2x4x3xf64>) -> tensor<3x2x4x8xf64> + return %0 : tensor<3x2x4x8xf64> + } + + func.func public @test_concat_9(%arg0: tensor<8x4x6xi32>, %arg1: tensor<8x4x2xi32>) -> tensor<8x4x8xi32> { + %0 = "stablehlo.concatenate"(%arg0, %arg1) { + dimension = 2 : i64 + } : (tensor<8x4x6xi32>, tensor<8x4x2xi32>) -> tensor<8x4x8xi32> + return %0 : tensor<8x4x8xi32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/divide_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/divide_op.mlir new file mode 100644 index 0000000000..44a758b843 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/divide_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_divice attributes {} { + func.func public @test_divide(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.divide %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/logical_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/logical_op.mlir new file mode 100644 index 0000000000..75763dbeb8 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/logical_op.mlir @@ -0,0 +1,27 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_compare attributes {} { + func.func public @logical_and(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> { + %0 = stablehlo.and %arg0, %arg1 : tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + + func.func public @logical_or(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> { + %0 = stablehlo.or %arg0, %arg1 : tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + +func.func public @logical_not(%arg0: tensor<64x128xi1>) -> tensor<64x128xi1> { + %0 = stablehlo.not %arg0 : tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } + +func.func public @logical_not_scalar(%arg0: tensor) -> tensor { + %0 = stablehlo.not %arg0 : tensor + return %0 : tensor + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/maximum_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/maximum_op.mlir new file mode 100644 index 0000000000..72e0bd14c7 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/maximum_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_maximum attributes {} { + func.func public @test_maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.maximum %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/minimum_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/minimum_op.mlir new file mode 100644 index 0000000000..7d7f3cac46 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/minimum_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_minimum attributes {} { + func.func public @test_minimum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.minimum %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/multiply_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/multiply_op.mlir new file mode 100644 index 0000000000..c08b966e62 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/multiply_op.mlir @@ -0,0 +1,15 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +#any_device_tile = #tt.operand_constraint + +module @jit_eltwise_multiply attributes {} { + func.func public @test_multiply(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { + %0 = stablehlo.multiply %arg0, %arg1 : tensor<13x21x3xf32> + return %0 : tensor<13x21x3xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/remainder_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/remainder_op.mlir new file mode 100644 index 0000000000..9bc6e7eb23 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/remainder_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_remainder attributes {} { + func.func public @test_remainder(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.remainder %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/binary/subtract_op.mlir b/test/ttmlir/Silicon/StableHLO/binary/subtract_op.mlir new file mode 100644 index 0000000000..7064ca12ab --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/binary/subtract_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_subtract attributes {} { + func.func public @test_subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.subtract %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/broadcast_op.mlir b/test/ttmlir/Silicon/StableHLO/broadcast_op.mlir new file mode 100644 index 0000000000..928bcbba49 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/broadcast_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_broadcast attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} { + func.func public @main(%arg0: tensor<1xf32> {mhlo.layout_mode = "default"}, %arg1: tensor<64x128xf32> {mhlo.layout_mode = "default"}) -> (tensor<64x128xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) { + %0 = stablehlo.broadcast_in_dim %arg0, dims = [1] : (tensor<1xf32>) -> tensor<64x128xf32> + %1 = stablehlo.maximum %0, %arg1 : tensor<64x128xf32> + return %1 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/composite_op.mlir b/test/ttmlir/Silicon/StableHLO/composite_op.mlir new file mode 100644 index 0000000000..a44ce1e65e --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/composite_op.mlir @@ -0,0 +1,19 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_add attributes {} { + func.func private @add_impl(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } + + func.func public @main(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { + %results = stablehlo.composite "jit_eltwise_add.my_add" %arg0, %arg1 { + decomposition = @add_impl + } : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + return %results : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/constant/constant_bf16.mlir b/test/ttmlir/Silicon/StableHLO/constant/constant_bf16.mlir new file mode 100644 index 0000000000..bd438963c4 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/constant/constant_bf16.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_bfloat16_scalar() -> tensor { + %0 = stablehlo.constant dense<3.0> : tensor + return %0 : tensor + } + + func.func public @test_bfloat16_empty() -> tensor<64x128xbf16> { + %0 = stablehlo.constant dense<0.0> : tensor<64x128xbf16> + return %0 : tensor<64x128xbf16> + } + + func.func public @test_bfloat16_splat() -> tensor<64x128xbf16> { + %0 = stablehlo.constant dense<3.0> : tensor<64x128xbf16> + return %0 : tensor<64x128xbf16> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/constant/constant_bool.mlir b/test/ttmlir/Silicon/StableHLO/constant/constant_bool.mlir new file mode 100644 index 0000000000..8fe2ceefc3 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/constant/constant_bool.mlir @@ -0,0 +1,17 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_boolean_scalar() -> tensor { + %0 = stablehlo.constant dense : tensor + return %0 : tensor + } + + func.func public @test_boolean_splat() -> tensor<64xi1> { + %0 = stablehlo.constant dense : tensor<64xi1> + return %0 : tensor<64xi1> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/constant/constant_f16.mlir b/test/ttmlir/Silicon/StableHLO/constant/constant_f16.mlir new file mode 100644 index 0000000000..eebd19ed31 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/constant/constant_f16.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_float16_scalar() -> tensor { + %0 = stablehlo.constant dense<3.0> : tensor + return %0 : tensor + } + + func.func public @test_float16_empty() -> tensor<64xf16> { + %0 = stablehlo.constant dense<0.0> : tensor<64xf16> + return %0 : tensor<64xf16> + } + + func.func public @test_float16_splat() -> tensor<64xf16> { + %0 = stablehlo.constant dense<3.0> : tensor<64xf16> + return %0 : tensor<64xf16> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/constant/constant_f32.mlir b/test/ttmlir/Silicon/StableHLO/constant/constant_f32.mlir new file mode 100644 index 0000000000..09ce2f18a8 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/constant/constant_f32.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_float_scalar() -> tensor { + %0 = stablehlo.constant dense<0.3> : tensor + return %0 : tensor + } + + func.func public @test_float_empty() -> tensor<64xf32> { + %0 = stablehlo.constant dense<0.0> : tensor<64xf32> + return %0 : tensor<64xf32> + } + + func.func public @test_float_splat() -> tensor<64xf32> { + %0 = stablehlo.constant dense<0.3> : tensor<64xf32> + return %0 : tensor<64xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/constant/constant_i16.mlir b/test/ttmlir/Silicon/StableHLO/constant/constant_i16.mlir new file mode 100644 index 0000000000..1b757844d5 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/constant/constant_i16.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_int32_scalar() -> tensor { + %0 = stablehlo.constant dense<3> : tensor + return %0 : tensor + } + + func.func public @test_int32_empty() -> tensor<64x128xi16> { + %0 = stablehlo.constant dense<0> : tensor<64x128xi16> + return %0 : tensor<64x128xi16> + } + + func.func public @test_int32_splat() -> tensor<64x128xi16> { + %0 = stablehlo.constant dense<3> : tensor<64x128xi16> + return %0 : tensor<64x128xi16> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/constant/constant_i32.mlir b/test/ttmlir/Silicon/StableHLO/constant/constant_i32.mlir new file mode 100644 index 0000000000..db88dc2d9c --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/constant/constant_i32.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_int32_scalar() -> tensor { + %0 = stablehlo.constant dense<3> : tensor + return %0 : tensor + } + + func.func public @test_int32_empty() -> tensor<64x128xi32> { + %0 = stablehlo.constant dense<0> : tensor<64x128xi32> + return %0 : tensor<64x128xi32> + } + + func.func public @test_int32_splat() -> tensor<64x128xi32> { + %0 = stablehlo.constant dense<3> : tensor<64x128xi32> + return %0 : tensor<64x128xi32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/constant/constant_i64.mlir b/test/ttmlir/Silicon/StableHLO/constant/constant_i64.mlir new file mode 100644 index 0000000000..ff1ece580e --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/constant/constant_i64.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_int64_scalar() -> tensor { + %0 = stablehlo.constant dense<3> : tensor + return %0 : tensor + } + + func.func public @test_int64_empty() -> tensor<64x128xi64> { + %0 = stablehlo.constant dense<0> : tensor<64x128xi64> + return %0 : tensor<64x128xi64> + } + + func.func public @test_int64_splat() -> tensor<64x128xi64> { + %0 = stablehlo.constant dense<3> : tensor<64x128xi64> + return %0 : tensor<64x128xi64> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/constant/constant_i8.mlir b/test/ttmlir/Silicon/StableHLO/constant/constant_i8.mlir new file mode 100644 index 0000000000..b95106601f --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/constant/constant_i8.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_constant attributes {} { + func.func public @test_int32_scalar() -> tensor { + %0 = stablehlo.constant dense<3> : tensor + return %0 : tensor + } + + func.func public @test_int32_empty() -> tensor<64x128xi8> { + %0 = stablehlo.constant dense<0> : tensor<64x128xi8> + return %0 : tensor<64x128xi8> + } + + func.func public @test_int32_splat() -> tensor<64x128xi8> { + %0 = stablehlo.constant dense<3> : tensor<64x128xi8> + return %0 : tensor<64x128xi8> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/conv2d_op.mlir b/test/ttmlir/Silicon/StableHLO/conv2d_op.mlir new file mode 100644 index 0000000000..f2571c6271 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/conv2d_op.mlir @@ -0,0 +1,21 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_convolution attributes {} { + func.func public @test_convolution(%arg0: tensor<1x128x128x32xf32>, %arg1: tensor<64x32x3x3xf32>) -> tensor<1x128x128x64xf32> { + %0 = stablehlo.convolution(%arg0, %arg1) + dim_numbers = [b, 0, 1, f]x[o, i, 0, 1]->[b, 0, 1, f], + window = { + stride = [1, 1], + pad = [[1, 1], [1, 1]], + } { + feature_group_count = 1 : i64, + batch_group_count = 1 : i64, + precision_config = [#stablehlo, #stablehlo] + } : (tensor<1x128x128x32xf32>, tensor<64x32x3x3xf32>) -> tensor<1x128x128x64xf32> + return %0 : tensor<1x128x128x64xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/convert_op.mlir b/test/ttmlir/Silicon/StableHLO/convert_op.mlir new file mode 100644 index 0000000000..af5287a3f9 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/convert_op.mlir @@ -0,0 +1,22 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_convert attributes {} { + func.func public @test_convert(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { + %0 = stablehlo.convert %arg0 : (tensor<64x128xf32>) -> tensor<64x128xbf16> + return %0 : tensor<64x128xbf16> + } +} + +module @jit_eltwise_add attributes {} { + func.func public @test_add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xbf16> { + %0 = stablehlo.convert %arg0 : (tensor<64x128xf32>) -> tensor<64x128xbf16> + %1 = stablehlo.convert %arg1 : (tensor<64x128xf32>) -> tensor<64x128xbf16> + %2 = stablehlo.add %0, %1 : tensor<64x128xbf16> + return %2 : tensor<64x128xbf16> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/dot_general_op.mlir b/test/ttmlir/Silicon/StableHLO/dot_general_op.mlir new file mode 100644 index 0000000000..72b6638f32 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/dot_general_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_dot_general attributes {} { + func.func public @test_dot_general(%arg0 : tensor<16x32xf32>, %arg1 : tensor<32x8xf32>) -> tensor<16x8xf32> { + %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<16x32xf32>, tensor<32x8xf32>) -> tensor<16x8xf32> + return %0 : tensor<16x8xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/maxpool2d_op.mlir b/test/ttmlir/Silicon/StableHLO/maxpool2d_op.mlir new file mode 100644 index 0000000000..d503729ab6 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/maxpool2d_op.mlir @@ -0,0 +1,16 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +func.func public @test_maxpool2d(%arg0: tensor<1x128x128x32xbf16>) -> tensor<1x64x64x32xbf16> { + %0 = stablehlo.constant dense<0xFF80> : tensor + %1 = stablehlo.broadcast_in_dim %0, dims = [] : (tensor) -> tensor + %2 = "stablehlo.reduce_window"(%arg0, %1) <{padding = dense<[[0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi64>, window_dimensions = array, window_strides = array}> ({ + ^bb0(%arg2: tensor, %arg3: tensor): + %3 = stablehlo.maximum %arg2, %arg3 : tensor + stablehlo.return %3 : tensor + }) : (tensor<1x128x128x32xbf16>, tensor) -> tensor<1x64x64x32xbf16> + return %2 : tensor<1x64x64x32xbf16> +} diff --git a/test/ttmlir/Silicon/StableHLO/mnist_inference.mlir b/test/ttmlir/Silicon/StableHLO/mnist_inference.mlir new file mode 100644 index 0000000000..6958d7f928 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/mnist_inference.mlir @@ -0,0 +1,41 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn +// TODO: Enable when all ops are supported. +// UNSUPPORTED: true +module @jit_predict attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} { + func.func public @main(%arg0: tensor<512x784xf32> {mhlo.layout_mode = "default"}, %arg1: tensor<512xf32> {mhlo.layout_mode = "default"}, %arg2: tensor<10x512xf32> {mhlo.layout_mode = "default"}, %arg3: tensor<10xf32> {mhlo.layout_mode = "default"}, %arg4: tensor<128x784xui8> {mhlo.layout_mode = "default"}) -> (tensor<128x10xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) { + %0 = stablehlo.convert %arg0 : tensor<512x784xf32> + %1 = stablehlo.convert %arg4 : (tensor<128x784xui8>) -> tensor<128x784xf32> + %2 = stablehlo.dot_general %0, %1, contracting_dims = [1] x [1], precision = [DEFAULT, DEFAULT] : (tensor<512x784xf32>, tensor<128x784xf32>) -> tensor<512x128xf32> + %3 = stablehlo.transpose %2, dims = [1, 0] : (tensor<512x128xf32>) -> tensor<128x512xf32> + %4 = stablehlo.broadcast_in_dim %arg1, dims = [1] : (tensor<512xf32>) -> tensor<1x512xf32> + %5 = stablehlo.broadcast_in_dim %4, dims = [0, 1] : (tensor<1x512xf32>) -> tensor<128x512xf32> + %6 = stablehlo.add %3, %5 : tensor<128x512xf32> + %cst = stablehlo.constant dense<0.000000e+00> : tensor + %7 = stablehlo.broadcast_in_dim %cst, dims = [] : (tensor) -> tensor<128x512xf32> + %8 = stablehlo.maximum %7, %6 : tensor<128x512xf32> + %9 = stablehlo.dot_general %arg2, %8, contracting_dims = [1] x [1], precision = [DEFAULT, DEFAULT] : (tensor<10x512xf32>, tensor<128x512xf32>) -> tensor<10x128xf32> + %10 = stablehlo.transpose %9, dims = [1, 0] : (tensor<10x128xf32>) -> tensor<128x10xf32> + %11 = stablehlo.broadcast_in_dim %arg3, dims = [1] : (tensor<10xf32>) -> tensor<1x10xf32> + %12 = stablehlo.broadcast_in_dim %11, dims = [0, 1] : (tensor<1x10xf32>) -> tensor<128x10xf32> + %13 = stablehlo.add %10, %12 : tensor<128x10xf32> + %cst_0 = stablehlo.constant dense<0xFF800000> : tensor + %14 = stablehlo.reduce(%13 init: %cst_0) applies stablehlo.maximum across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> + %cst_1 = stablehlo.constant dense<0xFF800000> : tensor + %15 = stablehlo.broadcast_in_dim %cst_1, dims = [] : (tensor) -> tensor<128xf32> + %16 = stablehlo.maximum %15, %14 : tensor<128xf32> + %17 = stablehlo.broadcast_in_dim %16, dims = [0] : (tensor<128xf32>) -> tensor<128x1xf32> + %18 = stablehlo.broadcast_in_dim %17, dims = [0, 1] : (tensor<128x1xf32>) -> tensor<128x10xf32> + %19 = stablehlo.subtract %13, %18 : tensor<128x10xf32> + %20 = stablehlo.exponential %19 : tensor<128x10xf32> + %cst_2 = stablehlo.constant dense<0.000000e+00> : tensor + %21 = stablehlo.reduce(%20 init: %cst_2) applies stablehlo.add across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> + %22 = stablehlo.broadcast_in_dim %21, dims = [0] : (tensor<128xf32>) -> tensor<128x1xf32> + %23 = stablehlo.broadcast_in_dim %22, dims = [0, 1] : (tensor<128x1xf32>) -> tensor<128x10xf32> + %24 = stablehlo.divide %20, %23 : tensor<128x10xf32> + return %24 : tensor<128x10xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir b/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir new file mode 100644 index 0000000000..1fcad52bd8 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn +// UNSUPPORTED: true +module @jit_reduce_add attributes {} { + func.func public @test_reduce_add(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { + %0 = stablehlo.reduce(%arg0 init: %cst_0) applies stablehlo.add across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> + return %0 : tensor<128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir b/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir new file mode 100644 index 0000000000..d620c8c509 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn +// UNSUPPORTED: true +module @jit_reduce_maximum attributes {} { + func.func public @test_reduce_maximum(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { + %0 = stablehlo.reduce(%arg0 init: %cst_0) applies stablehlo.maximum across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> + return %0 : tensor<128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/rehsape_op.mlir b/test/ttmlir/Silicon/StableHLO/rehsape_op.mlir new file mode 100644 index 0000000000..830ba29b85 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/rehsape_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_module_reshape attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} { + func.func public @main(%arg0: tensor<1x64x64x64xf32> {mhlo.layout_mode = "default", mhlo.sharding = "{replicated}"}) -> (tensor<1x1x4096x64xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) { + %0 = stablehlo.reshape %arg0 : (tensor<1x64x64x64xf32>) -> tensor<1x1x4096x64xf32> + return %0 : tensor<1x1x4096x64xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/scalar_add_op.mlir b/test/ttmlir/Silicon/StableHLO/scalar_add_op.mlir new file mode 100644 index 0000000000..26f697464e --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/scalar_add_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_scalar_add attributes {} { + func.func public @test_scalar_add(%arg0: tensor, %arg1: tensor) -> tensor { + %0 = stablehlo.add %arg0, %arg1 : tensor + return %0 : tensor + } +} diff --git a/test/ttmlir/Silicon/StableHLO/sign_op.mlir b/test/ttmlir/Silicon/StableHLO/sign_op.mlir new file mode 100644 index 0000000000..8a48333be8 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/sign_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_sign attributes {} { + func.func public @test_sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.sign %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/slice_op.mlir b/test/ttmlir/Silicon/StableHLO/slice_op.mlir new file mode 100644 index 0000000000..05d10ff4a1 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/slice_op.mlir @@ -0,0 +1,17 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_subtract attributes {} { + func.func @slice_op(%arg0: tensor<32x64xf32>) -> tensor<8x8xf32> { + %result = "stablehlo.slice"(%arg0) { + start_indices = array, + limit_indices = array, + strides = array + } : (tensor<32x64xf32>) -> tensor<8x8xf32> + return %result : tensor<8x8xf32> +} +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/absolute_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/absolute_op.mlir new file mode 100644 index 0000000000..068ba4ee83 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/absolute_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_abs attributes {} { + func.func public @test_abs(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.abs %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/cbrt_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/cbrt_op.mlir new file mode 100644 index 0000000000..0a3f92ccc9 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/cbrt_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_rsqrt attributes {} { + func.func public @test_cbrt(%arg0: tensor<64x128f64>) -> tensor<64x128f64> { + %0 = stablehlo.cbrt %arg0 : tensor<64x128f64> + return %0 : tensor<64x128f64> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/ceil_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/ceil_op.mlir new file mode 100644 index 0000000000..fb6a7ef371 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/ceil_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_ceil attributes {} { + func.func public @test_ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.ceil %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/cosine_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/cosine_op.mlir new file mode 100644 index 0000000000..8b90d1a7da --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/cosine_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_cosine attributes {} { + func.func public @test_cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.cosine %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/exponential_minus_one_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/exponential_minus_one_op.mlir new file mode 100644 index 0000000000..6f75889ba9 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/exponential_minus_one_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_expm1 attributes {} { + func.func public @test_expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.exponential_minus_one %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/exponential_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/exponential_op.mlir new file mode 100644 index 0000000000..bb9b4c23ef --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/exponential_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_exp attributes {} { + func.func public @test_exp(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.exponential %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/floor_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/floor_op.mlir new file mode 100644 index 0000000000..df7048cd52 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/floor_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_floor attributes {} { + func.func public @test_floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.floor %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/isfinite_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/isfinite_op.mlir new file mode 100644 index 0000000000..50ed563dcf --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/isfinite_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_isfinite attributes {} { + func.func public @test_isfinite(%arg0: tensor<64x128xf32>) -> tensor<64x128xi1> { + %0 = stablehlo.is_finite %arg0 : (tensor<64x128xf32>) -> tensor<64x128xi1> + return %0 : tensor<64x128xi1> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/log_plus_one_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/log_plus_one_op.mlir new file mode 100644 index 0000000000..d1d95c9809 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/log_plus_one_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_log_plus_one attributes {} { + func.func public @test_log_plus_one(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.log_plus_one %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/negate_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/negate_op.mlir new file mode 100644 index 0000000000..e495f7ef61 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/negate_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_neg attributes {} { + func.func public @test_neg(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.negate %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/rsqrt_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/rsqrt_op.mlir new file mode 100644 index 0000000000..a1b1c92521 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/rsqrt_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_rsqrt attributes {} { + func.func public @test_rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.rsqrt %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/sine_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/sine_op.mlir new file mode 100644 index 0000000000..19bcec2d46 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/sine_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_eltwise_sine attributes {} { + func.func public @test_sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.sine %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/sqrt_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/sqrt_op.mlir new file mode 100644 index 0000000000..7b893b8037 --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/sqrt_op.mlir @@ -0,0 +1,13 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +#any_device = #tt.operand_constraint +module @jit_eltwise_sqrt attributes {} { + func.func public @test_sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = stablehlo.sqrt %arg0 : tensor<64x128xf32> + return %0 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/StableHLO/unary/tranpose_op.mlir b/test/ttmlir/Silicon/StableHLO/unary/tranpose_op.mlir new file mode 100644 index 0000000000..2314dae90d --- /dev/null +++ b/test/ttmlir/Silicon/StableHLO/unary/tranpose_op.mlir @@ -0,0 +1,12 @@ +// REQUIRES: stablehlo +// RUN: rm -rf %t.ttnn +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" | \ +// RUN: ttmlir-translate --ttnn-to-flatbuffer > %t.ttnn + +module @jit_transpose attributes {} { + func.func public @test_transpose(%arg0: tensor<64x128xf32>) -> tensor<128x64xf32> { + %0 = stablehlo.transpose %arg0, dims = [1,0] : (tensor<64x128xf32>) -> tensor<128x64xf32> + return %0 : tensor<128x64xf32> + } +}