[Mlir-commits] [mlir] 57416d8 - [mlir][sparse] Fix a bug in rewriting dense2dense convert op.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Nov 11 09:12:00 PST 2022


Author: bixia1
Date: 2022-11-11T09:11:54-08:00
New Revision: 57416d872ae0fd3b8ee1808fed5c6b2ad4925bd8

URL: https://github.com/llvm/llvm-project/commit/57416d872ae0fd3b8ee1808fed5c6b2ad4925bd8
DIFF: https://github.com/llvm/llvm-project/commit/57416d872ae0fd3b8ee1808fed5c6b2ad4925bd8.diff

LOG: [mlir][sparse] Fix a bug in rewriting dense2dense convert op.

Permutation wasn't handled correctly. Add a test for the rewriting.

Extend an integration test to run with enable_runtime_library=false to
also test the rewriting.

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D137845

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
    mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 6d1fa8b095b57..6da7b8ab38e5c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -620,7 +620,6 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
     Value src = op.getSource();
     RankedTensorType srcTp = src.getType().cast<RankedTensorType>();
     RankedTensorType dstTp = op.getType().cast<RankedTensorType>();
-    SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(srcTp);
     SparseTensorEncodingAttr encDst = getSparseTensorEncoding(dstTp);
 
     SmallVector<Value, 4> srcSizes;
@@ -640,17 +639,15 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
           loc, src, tmpCoo,
           [&](OpBuilder &builder, Location loc, ValueRange args, Value v,
               ValueRange reduc) {
-            SmallVector<Value, 4> indices;
-            for (int64_t i = 0, e = srcTp.getRank(); i < e; i++) {
-              uint64_t dim = toStoredDim(encSrc, i);
-              indices.push_back(args[dim]);
-            }
-            auto t = builder.create<InsertOp>(loc, v, reduc.front(), indices);
+            // The resulting COO tensor has identity ordering.
+            auto t = builder.create<InsertOp>(loc, v, reduc.front(),
+                                              args.slice(0, srcTp.getRank()));
             builder.create<sparse_tensor::YieldOp>(loc, t);
           });
       src = rewriter.create<LoadOp>(loc, foreachOp.getResult(0), true);
     }
 
+    SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(srcTp);
     // Sort the COO tensor so that its elements are ordered via increasing
     // indices for the storage ordering of the dst tensor.
     auto dynShape = {ShapedType::kDynamicSize};
@@ -682,14 +679,14 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
     getDynamicSizes(dstTp, srcSizes, dynDstSizes);
     Value dst =
         rewriter.create<AllocTensorOp>(loc, dstTp, dynDstSizes).getResult();
+    SmallVector<Value, 4> indices(srcTp.getRank(), Value());
     auto foreachOp = rewriter.create<ForeachOp>(
         loc, src, dst,
         [&](OpBuilder &builder, Location loc, ValueRange args, Value v,
             ValueRange reduc) {
-          SmallVector<Value, 4> indices;
           for (int64_t i = 0, e = srcTp.getRank(); i < e; i++) {
             uint64_t dim = toStoredDim(encDst, i);
-            indices.push_back(args[dim]);
+            indices[dim] = args[i];
           }
           auto t = builder.create<InsertOp>(loc, v, reduc.front(), indices);
           builder.create<sparse_tensor::YieldOp>(loc, t);

diff  --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index f806d894aa1c9..2651a09897844 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -25,6 +25,16 @@
   dimLevelType = ["compressed"]
 }>
 
+#SortedWRT3D = #sparse_tensor.encoding<{
+  dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ]
+
+}>
+
+#TsssPermuted = #sparse_tensor.encoding<{
+  dimLevelType = [ "compressed", "compressed", "compressed" ],
+  dimOrdering = affine_map<(i,j,k) -> (k,i,j)>
+}>
+
 // CHECK-LABEL: func @sparse_nop_convert(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //       CHECK: return %[[A]] : !llvm.ptr<i8>
@@ -146,3 +156,31 @@ func.func @sparse_convert_singleton(%arg0: tensor<?xf32, #SparseSingleton64>) ->
   %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseSingleton64> to tensor<?xf32, #SparseSingleton32>
   return %0 : tensor<?xf32, #SparseSingleton32>
 }
+
+// CHECK-WRT-LABEL: func.func @sparse_convert_permuted(
+//  CHECK-WRT-SAME: %[[COO:.*]]:
+//   CHECK-WRT-DAG: %[[C0:.*]] = arith.constant 0 : index
+//   CHECK-WRT-DAG: %[[C1:.*]] = arith.constant 1 : index
+//   CHECK-WRT-DAG: %[[C2:.*]] = arith.constant 2 : index
+//       CHECK-WRT: %[[D0:.*]] = tensor.dim %[[COO]], %[[C0]]
+//       CHECK-WRT: %[[D1:.*]] = tensor.dim %[[COO]], %[[C1]]
+//       CHECK-WRT: %[[D2:.*]] = tensor.dim %[[COO]], %[[C2]]
+//       CHECK-WRT: %[[I0:.*]] = sparse_tensor.indices %[[COO]] {dimension = 0 : index}
+//       CHECK-WRT: %[[I1:.*]] = sparse_tensor.indices %[[COO]] {dimension = 1 : index}
+//       CHECK-WRT: %[[I2:.*]] = sparse_tensor.indices %[[COO]] {dimension = 2 : index}
+//       CHECK-WRT: %[[NNZ:.*]] = sparse_tensor.number_of_entries %[[COO]]
+//       CHECK-WRT: %[[V:.*]] = sparse_tensor.values %[[COO]]
+//       CHECK-WRT: sparse_tensor.sort %[[NNZ]], %[[I2]], %[[I0]], %[[I1]] jointly %[[V]]
+//       CHECK-WRT: %[[T1:.*]] = bufferization.alloc_tensor(%[[D0]], %[[D1]], %[[D2]])
+//       CHECK-WRT: %[[T2:.*]] = sparse_tensor.foreach in %[[COO]] init(%[[T1]])
+//       CHECK-WRT: ^bb0(%[[LI0:.*]]: index, %[[LI1:.*]]: index, %[[LI2:.*]]: index, %[[LV:.*]]: f32, %[[LT1:.*]]: tensor<?x?x?xf32,
+//       CHECK-WRT:   %[[LT2:.*]] = sparse_tensor.insert %[[LV]] into %[[LT1]]{{\[}}%[[LI2]], %[[LI0]], %[[LI1]]]
+//       CHECK-WRT:   sparse_tensor.yield %[[LT2]]
+//       CHECK-WRT: }
+//       CHECK-WRT: %[[T3:.*]] = sparse_tensor.load %[[T2:.*]] hasInserts
+//       CHECK-WRT: %[[T4:.*]] = sparse_tensor.convert %[[T3]]
+//       CHECK-WRT: return %[[T4]]
+func.func @sparse_convert_permuted(%arg0: tensor<?x?x?xf32, #SortedCOO3D>) -> tensor<?x?x?xf32, #TsssPermuted> {
+  %0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf32, #SortedCOO3D> to tensor<?x?x?xf32, #TsssPermuted>
+  return %0 : tensor<?x?x?xf32, #TsssPermuted>
+}

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
index 34fa31e85014c..18568f8e8a404 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
@@ -1,4 +1,12 @@
-// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=true" | \
+// RUN: TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \
+// RUN: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=false enable-buffer-initialization=true" | \
 // RUN: TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \
 // RUN: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \
 // RUN: mlir-cpu-runner \
@@ -66,7 +74,7 @@ module {
 
   func.func @dumpf(%arg0: memref<?xf64>) {
     %c0 = arith.constant 0 : index
-    %nan = arith.constant 0x7FF0000001000000 : f64
+    %nan = arith.constant 0x0 : f64
     %v = vector.transfer_read %arg0[%c0], %nan: memref<?xf64>, vector<20xf64>
     vector.print %v : vector<20xf64>
     return
@@ -96,7 +104,7 @@ module {
     // CHECK:      ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255, 0, 0, 0 )
-    // CHECK-NEXT: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, nan, nan, nan )
+    // CHECK-NEXT: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, 0, 0, 0 )
     //
     %p0 = sparse_tensor.pointers %0 { dimension = 0 : index }
       : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
@@ -115,7 +123,7 @@ module {
     // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 0, 1, 1, 2, 3, 98, 126, 126, 127, 127, 128, 249, 253, 253, 254, 255, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 3, 1, 3, 2, 3, 3, 0, 3, 0, 3, 3, 3, 1, 3, 0, 3, 0, 0, 0 )
-    // CHECK-NEXT: ( -1, 8, -5, -9, -7, 10, -11, 2, 12, -3, -13, 14, -15, 6, 16, 4, -17, nan, nan, nan )
+    // CHECK-NEXT: ( -1, 8, -5, -9, -7, 10, -11, 2, 12, -3, -13, 14, -15, 6, 16, 4, -17, 0, 0, 0 )
     //
     %p1 = sparse_tensor.pointers %1 { dimension = 0 : index }
       : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex>
@@ -134,8 +142,8 @@ module {
     // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 )
-    // CHECK-NEXT: ( 2, 3, 1, 2, 0, 1, 2, 3, 0, 2, 3, 0, 1, 2, 3, 1, 2, 0, 0, 0 )
-    // CHECK-NEXT: ( 3, 63, 11, 100, 66, 61, 13, 43, 77, 10, 46, 61, 53, 3, 75, 22, 18, nan, nan, nan )
+    // CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 )
+    // CHECK-NEXT: ( 3, 63, 11, 100, 66, 61, 13, 43, 77, 10, 46, 61, 53, 3, 75, 22, 18, 0, 0, 0 )
     //
     %p2 = sparse_tensor.pointers %2 { dimension = 0 : index }
       : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
@@ -150,15 +158,15 @@ module {
     call @dumpi(%p2)  : (memref<?xindex>) -> ()
     call @dumpi(%i20) : (memref<?xindex>) -> ()
     call @dumpi(%i21) : (memref<?xindex>) -> ()
-    call @dumpi(%i22) : (memref<?xindex>) -> ()
+    call @dumpi(%i21) : (memref<?xindex>) -> ()
     call @dumpf(%v2)  : (memref<?xf64>) -> ()
 
     //
     // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 )
-    // CHECK-NEXT: ( 2, 0, 1, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 1, 0, 0, 0 )
-    // CHECK-NEXT: ( 66, 77, 61, 11, 61, 53, 22, 3, 100, 13, 10, 3, 18, 63, 43, 46, 75, nan, nan, nan )
+    // CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 )
+    // CHECK-NEXT: ( 66, 77, 61, 11, 61, 53, 22, 3, 100, 13, 10, 3, 18, 63, 43, 46, 75, 0, 0, 0 )
     //
     %p3 = sparse_tensor.pointers %3 { dimension = 0 : index }
       : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
@@ -173,14 +181,14 @@ module {
     call @dumpi(%p3)  : (memref<?xindex>) -> ()
     call @dumpi(%i30) : (memref<?xindex>) -> ()
     call @dumpi(%i31) : (memref<?xindex>) -> ()
-    call @dumpi(%i32) : (memref<?xindex>) -> ()
+    call @dumpi(%i31) : (memref<?xindex>) -> ()
     call @dumpf(%v3)  : (memref<?xf64>) -> ()
 
     //
     // CHECK-NEXT: ( 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 1, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan )
+   // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     //
     %p4 = sparse_tensor.pointers %4 { dimension = 0 : index }
       : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
@@ -203,7 +211,7 @@ module {
     // CHECK-NEXT: ( 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 1, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     // CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 12, 10, 8, 6, 4, 22, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan )
+    // CHECK-NEXT: ( 12, 10, 8, 6, 4, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
     //
     %p5 = sparse_tensor.pointers %5 { dimension = 0 : index }
       : tensor<?x?xf64, #SortedCOO> to memref<?xindex>


        


More information about the Mlir-commits mailing list