From 4752426a89da221778e821181d4e7ccd230adabc Mon Sep 17 00:00:00 2001 From: Ashok Kumar Kannan Date: Fri, 22 Nov 2024 10:14:25 +0000 Subject: [PATCH] Map Clip, LeakyRelu, Gelu ops in MLIR --- forge/csrc/passes/lower_to_mlir.cpp | 5 ++++- forge/forge/op/eltwise_unary.py | 2 +- forge/test/mlir/test_ops.py | 15 ++++++++++++--- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/forge/csrc/passes/lower_to_mlir.cpp b/forge/csrc/passes/lower_to_mlir.cpp index 2db10b26c..d876ae6fd 100644 --- a/forge/csrc/passes/lower_to_mlir.cpp +++ b/forge/csrc/passes/lower_to_mlir.cpp @@ -555,14 +555,17 @@ class MLIRGenerator lowering_handler_map["abs"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["add"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["cast"] = &MLIRGenerator::emit_mlir_ttforge_op; + lowering_handler_map["clip"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["concatenate"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["conv2d"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["cosine"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["embedding"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["equal"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["exp"] = &MLIRGenerator::emit_mlir_ttforge_op; + lowering_handler_map["gelu"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["greater_equal"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["greater"] = &MLIRGenerator::emit_mlir_ttforge_op; + lowering_handler_map["leaky_relu"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["less"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["matmul"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["max_pool2d"] = &MLIRGenerator::emit_mlir_ttforge_op; @@ -574,6 +577,7 @@ class MLIRGenerator lowering_handler_map["reduce_max"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["reduce_sum"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["relu"] = &MLIRGenerator::emit_mlir_ttforge_op; + lowering_handler_map["remainder"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["reshape"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["sigmoid"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["sine"] = &MLIRGenerator::emit_mlir_ttforge_op; @@ -583,7 +587,6 @@ class MLIRGenerator lowering_handler_map["subtract"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["transpose"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["unsqueeze"] = &MLIRGenerator::emit_mlir_ttforge_op; - lowering_handler_map["remainder"] = &MLIRGenerator::emit_mlir_ttforge_op; } }; } // namespace diff --git a/forge/forge/op/eltwise_unary.py b/forge/forge/op/eltwise_unary.py index 365cb882e..cb4d9d84e 100644 --- a/forge/forge/op/eltwise_unary.py +++ b/forge/forge/op/eltwise_unary.py @@ -267,7 +267,7 @@ def LeakyRelu(name: str, operandA: Tensor, alpha: int) -> Tensor: Forge tensor """ - return op("leaky_relu", name, operandA, attrs=(alpha,)).get_tensor() + return op("leaky_relu", name, operandA, attrs=(alpha,), parameter=alpha).get_tensor() def Gelu(name: str, operandA: Tensor, approximate="none") -> Tensor: diff --git a/forge/test/mlir/test_ops.py b/forge/test/mlir/test_ops.py index a23776ded..a132e79ef 100644 --- a/forge/test/mlir/test_ops.py +++ b/forge/test/mlir/test_ops.py @@ -417,7 +417,6 @@ def forward(self, x): (1, 32, 512, 512), ], ) -@pytest.mark.xfail(reason="Found Unsupported operations while lowering from TTForge to TTIR in forward graph") @pytest.mark.push def test_leakyrelu(shape): @@ -465,7 +464,6 @@ def test_layernorm(batch_size, num_channels, height, width): (1, 128, 4096), ], ) -@pytest.mark.xfail(reason="Found Unsupported operations while lowering from TTForge to TTIR in forward graph") @pytest.mark.push def test_gelu(shape): @@ -486,9 +484,20 @@ def test_gelu(shape): "shape, min_val, max_val", [ ((1, 1, 256, 256), 0, 1), + ((1, 96, 96, 24), 6.0, 0.0), + ((1, 1, 32, 32), -0.5, 0.5), + ((2, 10, 5, 20), 2.0, -1.0), + ((3, 3, 3, 3), -3.0, -1.0), + ((1, 64, 64), -0.5, 0.5), + ((1, 128, 128), 1.0, -1.0), + ((2, 2, 2), -1.0, 0.0), + ((32, 32), -0.2, 0.2), + ((3, 3), -0.5, -0.2), + ((4,), 0.0, 2.0), + ((8,), -3.0, -1.0), ], ) -@pytest.mark.xfail(reason="Found Unsupported operations while lowering from TTForge to TTIR in forward graph") +@pytest.mark.xfail(reason="'ttnn.clamp' op input and output must have same shape") @pytest.mark.push def test_clip(shape, min_val, max_val): class Clip(nn.Module):