Skip to content

Commit

Permalink
[Misc] Introduce pass pipeline options/overrides. (#137)
Browse files Browse the repository at this point in the history
  • Loading branch information
nobradovictt authored Jul 16, 2024
1 parent 9489070 commit 907f53e
Show file tree
Hide file tree
Showing 5 changed files with 54 additions and 4 deletions.
17 changes: 16 additions & 1 deletion include/ttmlir/Dialect/TTNN/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,22 @@ namespace mlir::tt::ttnn {
#define GEN_PASS_REGISTRATION
#include "ttmlir/Dialect/TTNN/Passes.h.inc"

void createTTIRToTTNNBackendPipeline(OpPassManager &pm);
// Options for the TTIR to TTNN backend pipeline.
//
struct TTIRToTTNNBackendPipelineOptions
: public PassPipelineOptions<TTIRToTTNNBackendPipelineOptions> {
// If this option is true, run GridSet pass and try setting max available grid
// size for OP execution.
// If this option is false, skip running GridSet pass,
// thus leaving all ops on 1x1 grid.
Option<bool> gridSetPassEnabled{
*this, "enable-grid-set",
llvm::cl::desc("Determine and set max valid grid for Op execution."),
llvm::cl::init(true)};
};

void createTTIRToTTNNBackendPipeline(
OpPassManager &pm, const TTIRToTTNNBackendPipelineOptions &options);
} // namespace mlir::tt::ttnn

#endif
8 changes: 6 additions & 2 deletions lib/Dialect/TTNN/Transforms/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,9 +164,13 @@ class ConvertTTIRToTTNN
}
};

void createTTIRToTTNNBackendPipeline(OpPassManager &pm) {
void createTTIRToTTNNBackendPipeline(
OpPassManager &pm, const TTIRToTTNNBackendPipelineOptions &options) {
pm.addPass(mlir::tt::ttir::createTTIRLayout());
pm.addPass(mlir::tt::ttir::createTTIRGridSet());
if (options.gridSetPassEnabled) {
pm.addPass(mlir::tt::ttir::createTTIRGridSet());
}

pm.addPass(createTTNNOpenDevice());
pm.addPass(createConvertTTIRToTTNN());
}
Expand Down
3 changes: 2 additions & 1 deletion lib/RegisterAll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ void mlir::tt::registerAllPasses() {
"Pipeline lowering ttir to ttmetal backend.",
mlir::tt::ttmetal::createTTIRToTTMetalBackendPipeline);

mlir::PassPipelineRegistration<>(
mlir::PassPipelineRegistration<
mlir::tt::ttnn::TTIRToTTNNBackendPipelineOptions>(
"ttir-to-ttnn-backend-pipeline",
"Pipeline lowering ttir to ttmetal backend.",
mlir::tt::ttnn::createTTIRToTTNNBackendPipeline);
Expand Down
15 changes: 15 additions & 0 deletions test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {torch.debug_module_name = "_lambda", tt.system_desc = #tt.system_desc<[{arch = <wormhole_b0>, grid = <8x8>, l1_size = 1048576, num_dram_channels = 12, dram_channel_size = 1048576}], [0], [<pcie|host_mmio>], [<0, 0, 0, 0>]>} {
func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: #layout2 = #tt.layout<(d0, d1) -> (d0, d1), undef, <8x8>, memref<8x16xf32, #l1_>>
// CHECK: %[[C:.*]] = "ttnn.open_device"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.full"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] -> tensor<64x128xf32, #layout2>
%1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: "ttnn.close_device"[[C:.*]]
return %1 : tensor<64x128xf32>
}
}
15 changes: 15 additions & 0 deletions test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="enable-grid-set=false" %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {torch.debug_module_name = "_lambda", tt.system_desc = #tt.system_desc<[{arch = <wormhole_b0>, grid = <8x8>, l1_size = 1048576, num_dram_channels = 12, dram_channel_size = 1048576}], [0], [<pcie|host_mmio>], [<0, 0, 0, 0>]>} {
func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: #layout1 = #tt.layout<(d0, d1) -> (d0, d1), undef, <1x1>, memref<64x128xf32, #l1_>>
// CHECK: %[[C:.*]] = "ttnn.open_device"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.full"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] -> tensor<64x128xf32, #layout1>
%1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: "ttnn.close_device"[[C:.*]]
return %1 : tensor<64x128xf32>
}
}

0 comments on commit 907f53e

Please sign in to comment.