Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[mlir][sparse] remove sparse encoding propagation pass. #93593

Merged
merged 1 commit into from
May 28, 2024

Conversation

PeimingLiu
Copy link
Member

This reverts commit 4962148.

@PeimingLiu PeimingLiu marked this pull request as ready for review May 28, 2024 18:22
@llvmbot llvmbot added mlir:sparse Sparse compiler in MLIR mlir labels May 28, 2024
@PeimingLiu PeimingLiu merged commit 9983592 into llvm:main May 28, 2024
9 of 11 checks passed
@llvmbot
Copy link
Member

llvmbot commented May 28, 2024

@llvm/pr-subscribers-mlir-sparse

@llvm/pr-subscribers-mlir

Author: Peiming Liu (PeimingLiu)

Changes

This reverts commit 4962148.


Full diff: https://github.com/llvm/llvm-project/pull/93593.diff

3 Files Affected:

  • (modified) mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h (-6)
  • (modified) mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td (-36)
  • (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp (-13)
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
index bb49d6c256f21..d6d038ef65bdf 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
@@ -65,12 +65,6 @@ void populateSparseAssembler(RewritePatternSet &patterns, bool directOut);
 std::unique_ptr<Pass> createSparseAssembler();
 std::unique_ptr<Pass> createSparseAssembler(bool directOut);
 
-//===----------------------------------------------------------------------===//
-// The SparseEncodingPropagation pass.
-//===----------------------------------------------------------------------===//
-
-std::unique_ptr<Pass> createSparseEncodingPropagationPass();
-
 //===----------------------------------------------------------------------===//
 // The SparseReinterpretMap pass.
 //===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
index 94c3ca60030ee..2f844cee5ff52 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
@@ -40,42 +40,6 @@ def SparseAssembler : Pass<"sparse-assembler", "ModuleOp"> {
   ];
 }
 
-def SparseEncodingPropagation : Pass<"sparse-encoding-propagation", "func::FuncOp"> {
-  let summary = "Propagate sparse tensor encodings";
-  let description = [{
-    A pass that propagates sparse tensor encodings.
-
-    Background: To avoid introducing repetitive operations, sparse tensors
-    in MLIR try to reuse tensor operations whenever available. However, most
-    tensor operations are canonicalized/transformed without the knowledge
-    of sparsity. The pass tries to propagate missing sparse encodings.
-
-    For example:
-    ```mlir
-    %s = tensor.extract_slice %input[0, 0,] [2, 1] [1, 1]
-       : tensor<2x3xf32, #sparse> to tensor<2x1xf32, #sparse>
-
-    // After rank reducing (by tensor dialect transformation)
-    %t = tensor.extract_slice %input[0, 0,] [2, 1] [1, 1]
-       : tensor<2x3xf32, #sparse> to tensor<2xf32>
-    %s = tensor.expand_shape [[0, 1]] %t
-       : tensor<2xf32> to tensor<2x1xf32, #sparse>
-
-    // After sparsity propagation
-    %t = tensor.extract_slice %input[0, 0,] [2, 1] [1, 1]
-       : tensor<2x3xf32, #sparse> to tensor<2xf32, #sparse1>
-    %s = tensor.expand_shape [[0, 1]] %t
-       : tensor<2xf32, #sparse1> to tensor<2x1xf32, #sparse>
-    ```
-  }];
-
-  let constructor = "mlir::createSparseEncodingPropagationPass()";
-  let dependentDialects = [
-    "sparse_tensor::SparseTensorDialect",
-    "tensor::TensorDialect",
-  ];
-}
-
 def SparseReinterpretMap : Pass<"sparse-reinterpret-map", "ModuleOp"> {
   let summary = "Reinterprets sparse tensor type mappings";
   let description = [{
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
index f57353b5892b5..b42d58634a36c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
@@ -23,7 +23,6 @@
 
 namespace mlir {
 #define GEN_PASS_DEF_SPARSEASSEMBLER
-#define GEN_PASS_DEF_SPARSEENCODINGPROPAGATION
 #define GEN_PASS_DEF_SPARSEREINTERPRETMAP
 #define GEN_PASS_DEF_PRESPARSIFICATIONREWRITE
 #define GEN_PASS_DEF_SPARSIFICATIONPASS
@@ -61,14 +60,6 @@ struct SparseAssembler : public impl::SparseAssemblerBase<SparseAssembler> {
   }
 };
 
-struct SparseEncodingPropagation
-    : public impl::SparseEncodingPropagationBase<SparseEncodingPropagation> {
-  SparseEncodingPropagation() = default;
-  SparseEncodingPropagation(const SparseEncodingPropagation &pass) = default;
-
-  void runOnOperation() override {}
-};
-
 struct SparseReinterpretMap
     : public impl::SparseReinterpretMapBase<SparseReinterpretMap> {
   SparseReinterpretMap() = default;
@@ -407,10 +398,6 @@ std::unique_ptr<Pass> mlir::createSparseAssembler() {
   return std::make_unique<SparseAssembler>();
 }
 
-std::unique_ptr<Pass> mlir::createSparseEncodingPropagationPass() {
-  return std::make_unique<SparseEncodingPropagation>();
-}
-
 std::unique_ptr<Pass> mlir::createSparseReinterpretMapPass() {
   return std::make_unique<SparseReinterpretMap>();
 }

@PeimingLiu PeimingLiu deleted the remove-pass branch May 28, 2024 18:31
vg0204 pushed a commit to vg0204/llvm-project that referenced this pull request May 29, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
mlir:sparse Sparse compiler in MLIR mlir
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants