Skip to content

Commit

Permalink
Add support for expm1 op (#1045)
Browse files Browse the repository at this point in the history
- Add end-to-end implementation of the expm1 op
- Add stablehlo to ttir to ttnn IR conversion of the op
  • Loading branch information
ajakovljevicTT authored Nov 1, 2024
1 parent d3ef8e5 commit f2ccda0
Show file tree
Hide file tree
Showing 12 changed files with 83 additions and 0 deletions.
11 changes: 11 additions & 0 deletions include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,17 @@ def TTIR_Log1pOp: TTIR_ElementwiseUnaryOp<"log1p"> {
}];
}

def TTIR_Expm1Op: TTIR_ElementwiseUnaryOp<"expm1"> {
let description = [{
Performs element-wise exponential minus one operation on `operand` tensor
and stores the result in the output tensor.

Example:
%a: [[0, 1], [0, 0]]
"ttir.exmp1"(%a, %out) -> %out: [[0, 1.71828], [0, 0]]
}];
}

class TTIR_ElementwiseBinaryOp<string mnemonic, list<Trait> traits = []> :
TTIR_ElementwiseOp<mnemonic, traits> {
let summary = "Eltwise binary op.";
Expand Down
11 changes: 11 additions & 0 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,17 @@ def TTNN_Log1pOp: TTNN_ElementwiseUnaryOp<"log1p"> {
}];
}

def TTNN_Expm1Op: TTNN_ElementwiseUnaryOp<"expm1"> {
let description = [{
Performs element-wise exponential minus one operation on `operand` tensor
and stores the result in the output tensor.

Example:
%a: [[0, 1], [0, 0]]
"ttnn.exmp1"(%a, %out) -> %out: [[0, 1.71828], [0, 0]]
}];
}

def TTNN_AddOp : TTNN_ElementwiseBinaryOp<"add"> {
let summary = "Eltwise add.";
let description = [{
Expand Down
1 change: 1 addition & 0 deletions include/ttmlir/Target/TTNN/program.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ enum EltwiseOpType: uint32 {
Cos = 27,
Log = 28,
Log1p = 29,
Expm1 = 30
}

union EltwiseOpParams {
Expand Down
2 changes: 2 additions & 0 deletions lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -861,6 +861,8 @@ void addElementwiseUnaryOpsConversionPatterns(MLIRContext *ctx,
mlir::stablehlo::SqrtOp, mlir::tt::ttir::SqrtOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::Log1pOp, mlir::tt::ttir::Log1pOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::Expm1Op, mlir::tt::ttir::Expm1Op>>(typeConverter, ctx);
}

void addElementwiseBinaryOpsConversionPatterns(MLIRContext *ctx,
Expand Down
1 change: 1 addition & 0 deletions lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -885,6 +885,7 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns,
ElementwiseOpConversionPattern<ttir::CeilOp, ttnn::CeilOp>,
ElementwiseOpConversionPattern<ttir::SinOp, ttnn::SinOp>,
ElementwiseOpConversionPattern<ttir::CosOp, ttnn::CosOp>,
ElementwiseOpConversionPattern<ttir::Expm1Op, ttnn::Expm1Op>,
ReductionOpConversionPattern<ttir::SumOp, ttnn::SumOp>,
ReductionOpConversionPattern<ttir::MeanOp, ttnn::MeanOp>,
ReductionOpConversionPattern<ttir::MaxOp, ttnn::MaxOp>,
Expand Down
1 change: 1 addition & 0 deletions lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -628,6 +628,7 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx,
DefaultOpConversionPattern<ttnn::CeilOp>,
DefaultOpConversionPattern<ttnn::SinOp>,
DefaultOpConversionPattern<ttnn::CosOp>,
DefaultOpConversionPattern<ttnn::Expm1Op>,
DefaultOpConversionPattern<ttnn::LogOp>>(typeConverter, ctx);

// Eltwise binary ops
Expand Down
5 changes: 5 additions & 0 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,8 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) {
type = ::tt::target::ttnn::EltwiseOpType::Sin;
} else if constexpr (std::is_same_v<EltwiseOp, LogOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Log;
} else if constexpr (std::is_same_v<EltwiseOp, Expm1Op>) {
type = ::tt::target::ttnn::EltwiseOpType::Expm1;
} else {
llvm_unreachable("unhandled EltwiseOp");
}
Expand Down Expand Up @@ -604,6 +606,9 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op,
if (auto logOp = dyn_cast<LogOp>(op); logOp) {
return createOperation(cache, createEltwiseOp(cache, logOp), debugString);
}
if (auto expm1Op = dyn_cast<Expm1Op>(op); expm1Op) {
return createOperation(cache, createEltwiseOp(cache, expm1Op), debugString);
}
if (auto sigmoidOp = dyn_cast<SigmoidOp>(op); sigmoidOp) {
return createOperation(cache, createEltwiseOp(cache, sigmoidOp),
debugString);
Expand Down
4 changes: 4 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/unary/unary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,10 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::log);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Expm1: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::expm1);
break;
}
default:
throw std::invalid_argument("Unsupported unary operation");
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module @jit_eltwise_expm1 attributes {} {
func.func public @test_expm1(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = stablehlo.exponential_minus_one %arg0 : tensor<13x21x3xf32>
// CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]]
// CHECK: [[VAL1:%[0-9]+]] = "ttir.expm1"(%arg0, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]]
return %0 : tensor<13x21x3xf32>
// CHECK: return [[VAL1]] : [[TENSOR_SIZE]]
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}>
%1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %{{[0-9]+}} = "ttnn.expm1"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>
return %1 : tensor<64x128xf32>
// CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}>
}
}
14 changes: 14 additions & 0 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_expm1.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}>
%1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %{{[0-9]+}} = "ttnn.expm1"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>
return %1 : tensor<64x128xf32>
// CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}>
}
9 changes: 9 additions & 0 deletions test/ttmlir/Silicon/TTNN/simple_eltwise.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -200,3 +200,12 @@ func.func @log1p(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
return %1 : tensor<64x128xf32>
// CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}>
}

func.func @expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}>
%1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %{{[0-9]+}} = "ttnn.expm1"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>
return %1 : tensor<64x128xf32>
// CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}>
}

0 comments on commit f2ccda0

Please sign in to comment.