Skip to content

Commit

Permalink
Softmax
Browse files Browse the repository at this point in the history
  • Loading branch information
mtopalovicTT committed Oct 3, 2024
1 parent be3ce1c commit 346225d
Show file tree
Hide file tree
Showing 7 changed files with 6 additions and 16 deletions.
7 changes: 1 addition & 6 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -316,22 +316,17 @@ def TTNN_EmbeddingOp : TTNN_Op<"embedding"> {
let hasVerifier = 1;
}

def TTNN_SoftmaxOp : TTNN_NamedDPSOp<"softmax"> {
def TTNN_SoftmaxOp : TTNN_Op<"softmax"> {
let summary = "Softmax op.";
let description = [{
Softmax operation.
}];

let arguments = (ins AnyRankedTensor:$input,
AnyRankedTensor:$output,
SI32Attr: $dimension);

let results = (outs AnyRankedTensor:$result);

let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
}];

let hasVerifier = 1;
}

Expand Down
3 changes: 2 additions & 1 deletion lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -301,9 +301,10 @@ class SoftmaxOpConversionPattern : public OpConversionPattern<ttir::SoftmaxOp> {
LogicalResult
matchAndRewrite(ttir::SoftmaxOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
removeDpsOp(rewriter, adaptor);
rewriter.replaceOpWithNewOp<ttnn::SoftmaxOp>(
op, this->getTypeConverter()->convertType(op.getType()),
adaptor.getInput(), adaptor.getOutput(), adaptor.getDimension());
adaptor.getInput(), adaptor.getDimension());
return success();
}
};
Expand Down
2 changes: 1 addition & 1 deletion lib/Dialect/TTNN/IR/TTNNOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ ::mlir::LogicalResult mlir::tt::ttnn::EmbeddingOp::verify() {

::mlir::LogicalResult mlir::tt::ttnn::SoftmaxOp::verify() {
::mlir::RankedTensorType inputType = getInput().getType();
::mlir::RankedTensorType outputType = getOutput().getType();
::mlir::RankedTensorType outputType = getResult().getType();

// Shapes of input and output of a softmax operation must be the same
if (inputType.getShape() != outputType.getShape()) {
Expand Down
4 changes: 2 additions & 2 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -390,8 +390,8 @@ ::flatbuffers::Offset<::tt::target::ttnn::SoftmaxOp>
createSoftmaxOp(FlatbufferObjectCache &cache, SoftmaxOp op) {
auto in =
cache.at<::tt::target::TensorRef>(getOperandThroughDPSOps(op.getInput()));
auto out = cache.at<::tt::target::TensorRef>(
getOperandThroughDPSOps(op.getResult()));
auto out = cache.getOrCreate(op.getResult(), tensorValueToFlatbuffer,
kHostAllocatedAddress, kHostAllocatedSize);
int32_t dimension = op.getDimension();

return ::tt::target::ttnn::CreateSoftmaxOp(*cache.fbb, in, out, dimension);
Expand Down
2 changes: 0 additions & 2 deletions test/ttmlir/Dialect/TTNN/softmax/simple_softmax.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,10 @@
#any_device = #tt.operand_constraint<dram|l1|tile|any_device|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<512x1024xbf16>
// CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]]
// Check for positive dimension attribute
%1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16>
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%2 = tensor.empty() : tensor<512x1024xbf16>
// CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]]
// Check for negative dimension attribute
Expand Down
2 changes: 0 additions & 2 deletions test/ttmlir/Silicon/TTNN/sharded/simple_eltwise_sharded.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -83,12 +83,10 @@ func.func @sqrt(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> {
}

func.func @softmax(%arg0: tensor<224x64xbf16>) -> tensor<224x64xbf16> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<224x64xbf16>
// CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]]
// Check for positive dimension attribute
%1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32, operand_constraints = [#l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xbf16>, tensor<224x64xbf16>) -> tensor<224x64xbf16>
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%2 = tensor.empty() : tensor<224x64xbf16>
// CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]]
// Check for negative dimension attribute
Expand Down
2 changes: 0 additions & 2 deletions test/ttmlir/Silicon/TTNN/simple_eltwise.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,10 @@ func.func @rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
}

func.func @softmax(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<512x1024xbf16>
// CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]]
// Check for positive dimension attribute
%1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16>
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%2 = tensor.empty() : tensor<512x1024xbf16>
// CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]]
// Check for negative dimension attribute
Expand Down

0 comments on commit 346225d

Please sign in to comment.