[Mlir-commits] [mlir] 30b550f - [mlir][sparse] add loop simplification to sparsification pass

Aart Bik llvmlistbot at llvm.org
Fri Sep 16 16:30:09 PDT 2022


Author: Aart Bik
Date: 2022-09-16T16:29:47-07:00
New Revision: 30b550f14c0a042d60e5e52b9c4e5ad5707db66e

URL: https://github.com/llvm/llvm-project/commit/30b550f14c0a042d60e5e52b9c4e5ad5707db66e
DIFF: https://github.com/llvm/llvm-project/commit/30b550f14c0a042d60e5e52b9c4e5ad5707db66e.diff

LOG: [mlir][sparse] add loop simplification to sparsification pass

Reviewed By: bixia

Differential Revision: https://reviews.llvm.org/D134090

Added: 
    mlir/test/Dialect/SparseTensor/one_trip.mlir

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
index 92a8f5ee46870..6f48e134d4770 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
@@ -64,6 +64,7 @@ struct SparsificationPass
     RewritePatternSet patterns(ctx);
     populateSparsificationPatterns(patterns, options);
     vector::populateVectorToVectorCanonicalizationPatterns(patterns);
+    scf::ForOp::getCanonicalizationPatterns(patterns, ctx);
     (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
   }
 };

diff  --git a/mlir/test/Dialect/SparseTensor/one_trip.mlir b/mlir/test/Dialect/SparseTensor/one_trip.mlir
new file mode 100644
index 0000000000000..9a54aef16d718
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/one_trip.mlir
@@ -0,0 +1,34 @@
+// RUN: mlir-opt %s -sparsification -cse | FileCheck %s
+
+#Dense = #sparse_tensor.encoding<{
+  dimLevelType = [ "dense" , "dense" ]
+}>
+
+#trait_scale = {
+  indexing_maps = [
+    affine_map<(i,j) -> (i,j)>  // X (out)
+  ],
+  iterator_types = ["parallel", "parallel"],
+  doc = "X(i,j) = X(i,j) * 2.0"
+}
+
+// CHECK-LABEL: func.func @sparse_scale(
+// CHECK-SAME:    %[[VAL_0:.*]]: tensor<1x1xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>>)
+// CHECK-DAG:     %[[VAL_1:.*]] = arith.constant 0 : index
+// CHECK-DAG:     %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f32
+// CHECK:         %[[VAL_3:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>> to memref<?xf32>
+// CHECK:         %[[VAL_4:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref<?xf32>
+// CHECK:         %[[VAL_5:.*]] = arith.mulf %[[VAL_4]], %[[VAL_2]] : f32
+// CHECK:         memref.store %[[VAL_5]], %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref<?xf32>
+// CHECK:         %[[VAL_6:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>>
+// CHECK:         return %[[VAL_6]] : tensor<1x1xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>>
+func.func @sparse_scale(%argx: tensor<1x1xf32, #Dense>) -> tensor<1x1xf32, #Dense> {
+  %c = arith.constant 2.0 : f32
+  %0 = linalg.generic #trait_scale
+    outs(%argx: tensor<1x1xf32, #Dense>) {
+      ^bb(%x: f32):
+        %1 = arith.mulf %x, %c : f32
+        linalg.yield %1 : f32
+  } -> tensor<1x1xf32, #Dense>
+  return %0 : tensor<1x1xf32, #Dense>
+}


        


More information about the Mlir-commits mailing list