[Mlir-commits] [mlir] [mlir][sparse] migrate tests to sparse_tensor.print (PR #84055)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Tue Mar 5 10:33:51 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir-sparse

Author: Aart Bik (aartbik)

<details>
<summary>Changes</summary>

Continuing the efforts started in #<!-- -->83357

---

Patch is 25.95 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/84055.diff


5 Files Affected:

- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir (+33-48) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir (+66-72) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir (+21-32) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir (+21-32) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir (+40-48) 


``````````diff
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
index 515f5b88b480d2..3fa1db1a14cea2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -51,11 +51,6 @@
 
 module {
   func.func private @printMemrefF64(%ptr : tensor<*xf64>)
-  func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
-
-  //
-  // Tests without permutation.
-  //
 
   // Concats all sparse matrices (with different encodings) to a sparse matrix.
   func.func @concat_sparse_sparse(%arg0: tensor<2x4xf64, #MAT_C_C>, %arg1: tensor<3x4xf64, #MAT_C_D>, %arg2: tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C> {
@@ -85,30 +80,15 @@ module {
     return %0 : tensor<9x4xf64>
   }
 
-  func.func @dump_mat_9x4(%A: tensor<9x4xf64, #MAT_C_C>) {
-    %c = sparse_tensor.convert %A : tensor<9x4xf64, #MAT_C_C> to tensor<9x4xf64>
-    %cu = tensor.cast %c : tensor<9x4xf64> to tensor<*xf64>
-    call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
-
-    %n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_C_C>
-    vector.print %n : index
-
-    %1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_C_C> to memref<?xf64>
-    call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
-
-    bufferization.dealloc_tensor %c : tensor<9x4xf64>
-    return
-  }
-
+  // Outputs dense matrix.
   func.func @dump_mat_dense_9x4(%A: tensor<9x4xf64>) {
     %u = tensor.cast %A : tensor<9x4xf64> to tensor<*xf64>
     call @printMemrefF64(%u) : (tensor<*xf64>) -> ()
-
     return
   }
 
   // Driver method to call and verify kernels.
-  func.func @entry() {
+  func.func @main() {
     %m24 = arith.constant dense<
       [ [ 1.0, 0.0, 3.0, 0.0],
         [ 0.0, 2.0, 0.0, 0.0] ]> : tensor<2x4xf64>
@@ -126,22 +106,24 @@ module {
     %sm34cd = sparse_tensor.convert %m34 : tensor<3x4xf64> to tensor<3x4xf64, #MAT_C_D>
     %sm44dc = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C>
 
-    // CHECK:      {{\[}}[1,   0,   3,   0],
-    // CHECK-NEXT:  [0,   2,   0,   0],
-    // CHECK-NEXT:  [1,   0,   1,   1],
-    // CHECK-NEXT:  [0,   0.5,   0,   0],
-    // CHECK-NEXT:  [1,   5,   2,   0],
-    // CHECK-NEXT:  [0,   0,   1.5,   1],
-    // CHECK-NEXT:  [0,   3.5,   0,   0],
-    // CHECK-NEXT:  [1,   5,   2,   0],
-    // CHECK-NEXT:  [1,   0.5,   0,   0]]
-    // CHECK-NEXT: 18
-    // CHECK:      [1,  3,  2,  1,  1,  1,  0.5,  1,  5,  2,  1.5,  1,  3.5,  1,  5,  2,  1,  0.5
+    //
+    // CHECK: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 18
+    // CHECK-NEXT: dim = ( 9, 4 )
+    // CHECK-NEXT: lvl = ( 9, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 9,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
+    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
+    // CHECK-NEXT: ----
+    //
     %0 = call @concat_sparse_sparse(%sm24cc, %sm34cd, %sm44dc)
                : (tensor<2x4xf64, #MAT_C_C>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
-    call @dump_mat_9x4(%0) : (tensor<9x4xf64, #MAT_C_C>) -> ()
+    sparse_tensor.print %0 : tensor<9x4xf64, #MAT_C_C>
 
-    // CHECK:      {{\[}}[1,   0,   3,   0],
+    //
+    // CHECK: {{\[}}[1,   0,   3,   0],
     // CHECK-NEXT:  [0,   2,   0,   0],
     // CHECK-NEXT:  [1,   0,   1,   1],
     // CHECK-NEXT:  [0,   0.5,   0,   0],
@@ -150,6 +132,7 @@ module {
     // CHECK-NEXT:  [0,   3.5,   0,   0],
     // CHECK-NEXT:  [1,   5,   2,   0],
     // CHECK-NEXT:  [1,   0.5,   0,   0]]
+    //
     %1 = call @concat_sparse_dense(%sm24cc, %sm34cd, %sm44dc)
                : (tensor<2x4xf64, #MAT_C_C>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
     call @dump_mat_dense_9x4(%1) : (tensor<9x4xf64>) -> ()
@@ -167,22 +150,24 @@ module {
     // CHECK:      [1,  3,  2,  1,  1,  1,  0.5,  1,  5,  2,  1.5,  1,  3.5,  1,  5,  2,  1,  0.5
     %2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)
                : (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
-    call @dump_mat_9x4(%2) : (tensor<9x4xf64, #MAT_C_C>) -> ()
-
-    // CHECK:      {{\[}}[1,   0,   3,   0],
-    // CHECK-NEXT:  [0,   2,   0,   0],
-    // CHECK-NEXT:  [1,   0,   1,   1],
-    // CHECK-NEXT:  [0,   0.5,   0,   0],
-    // CHECK-NEXT:  [1,   5,   2,   0],
-    // CHECK-NEXT:  [0,   0,   1.5,   1],
-    // CHECK-NEXT:  [0,   3.5,   0,   0],
-    // CHECK-NEXT:  [1,   5,   2,   0],
-    // CHECK-NEXT:  [1,   0.5,   0,   0]]
+    sparse_tensor.print %2 : tensor<9x4xf64, #MAT_C_C>
+
+    //
+    // CHECK: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 18
+    // CHECK-NEXT: dim = ( 9, 4 )
+    // CHECK-NEXT: lvl = ( 9, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 9,  )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,  )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,  )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,  )
+    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,  )
+    // CHECK-NEXT: ----
+    //
     %3 = call @concat_mix_dense(%m24, %sm34cd, %sm44dc)
                : (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
     call @dump_mat_dense_9x4(%3) : (tensor<9x4xf64>) -> ()
 
-
     // Release resources.
     bufferization.dealloc_tensor %sm24cc  : tensor<2x4xf64, #MAT_C_C>
     bufferization.dealloc_tensor %sm34cd  : tensor<3x4xf64, #MAT_C_D>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir
index e47ac46597b77a..464de9c8a2c3a6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -45,7 +45,6 @@
   map = ( i, j ) -> (i : dense, j : compressed)
 }>
 
-
 #BSR = #sparse_tensor.encoding<{
   map = ( i, j ) ->
   ( i floordiv 2 : dense,
@@ -65,67 +64,66 @@
 
 module {
 
-func.func @mul(%arg0: tensor<4x8xf64>,
-               %arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
-  %out = arith.constant dense<0.0> : tensor<4x4xf64>
-  %0 = linalg.generic #trait_mul
-    ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #BSR>)
-    outs(%out: tensor<4x4xf64>) {
-      ^bb(%x: f64, %y : f64, %z : f64):
-        %1 = arith.mulf %x, %y : f64
-        %2 = arith.addf %1, %z : f64
-        linalg.yield %2 : f64
-  } -> tensor<4x4xf64>
-  return %0 : tensor<4x4xf64>
-}
-
-func.func @mul_24(%arg0: tensor<4x8xf64>,
-                  %arg1: tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64> {
-  %out = arith.constant dense<0.0> : tensor<4x4xf64>
-  %0 = linalg.generic #trait_mul
-    ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #NV_24>)
-    outs(%out: tensor<4x4xf64>) {
-      ^bb(%x: f64, %y : f64, %z : f64):
-        %1 = arith.mulf %x, %y : f64
-        %2 = arith.addf %1, %z : f64
-        linalg.yield %2 : f64
-  } -> tensor<4x4xf64>
-  return %0 : tensor<4x4xf64>
-}
+  func.func @mul(%arg0: tensor<4x8xf64>,
+                 %arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
+    %out = arith.constant dense<0.0> : tensor<4x4xf64>
+    %0 = linalg.generic #trait_mul
+      ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #BSR>)
+      outs(%out: tensor<4x4xf64>) {
+        ^bb(%x: f64, %y : f64, %z : f64):
+          %1 = arith.mulf %x, %y : f64
+          %2 = arith.addf %1, %z : f64
+          linalg.yield %2 : f64
+    } -> tensor<4x4xf64>
+    return %0 : tensor<4x4xf64>
+  }
 
-func.func @mul_csr_bsr(%arg0: tensor<4x8xf64, #CSR>,
-                       %arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
-  %out = arith.constant dense<0.0> : tensor<4x4xf64>
-  %0 = linalg.generic #trait_mul
-    ins(%arg0, %arg1: tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>)
-    outs(%out: tensor<4x4xf64>) {
-      ^bb(%x: f64, %y : f64, %z : f64):
-        %1 = arith.mulf %x, %y : f64
-        %2 = arith.addf %1, %z : f64
-        linalg.yield %2 : f64
-  } -> tensor<4x4xf64>
-  return %0 : tensor<4x4xf64>
-}
+  func.func @mul_24(%arg0: tensor<4x8xf64>,
+                    %arg1: tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64> {
+    %out = arith.constant dense<0.0> : tensor<4x4xf64>
+    %0 = linalg.generic #trait_mul
+      ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #NV_24>)
+      outs(%out: tensor<4x4xf64>) {
+        ^bb(%x: f64, %y : f64, %z : f64):
+          %1 = arith.mulf %x, %y : f64
+          %2 = arith.addf %1, %z : f64
+          linalg.yield %2 : f64
+    } -> tensor<4x4xf64>
+    return %0 : tensor<4x4xf64>
+  }
 
-func.func @mul_dense(%arg0: tensor<4x8xf64>,
-                     %arg1: tensor<4x8xf64>) -> tensor<4x4xf64> {
-  %out = arith.constant dense<0.0> : tensor<4x4xf64>
-  %0 = linalg.generic #trait_mul
-    ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64>)
-    outs(%out: tensor<4x4xf64>) {
-      ^bb(%x: f64, %y : f64, %z : f64):
-        %1 = arith.mulf %x, %y : f64
-        %2 = arith.addf %1, %z : f64
-        linalg.yield %2 : f64
-  } -> tensor<4x4xf64>
-  return %0 : tensor<4x4xf64>
-}
+  func.func @mul_csr_bsr(%arg0: tensor<4x8xf64, #CSR>,
+                         %arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
+    %out = arith.constant dense<0.0> : tensor<4x4xf64>
+    %0 = linalg.generic #trait_mul
+      ins(%arg0, %arg1: tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>)
+      outs(%out: tensor<4x4xf64>) {
+        ^bb(%x: f64, %y : f64, %z : f64):
+          %1 = arith.mulf %x, %y : f64
+          %2 = arith.addf %1, %z : f64
+          linalg.yield %2 : f64
+    } -> tensor<4x4xf64>
+    return %0 : tensor<4x4xf64>
+  }
 
+  func.func @mul_dense(%arg0: tensor<4x8xf64>,
+                       %arg1: tensor<4x8xf64>) -> tensor<4x4xf64> {
+    %out = arith.constant dense<0.0> : tensor<4x4xf64>
+    %0 = linalg.generic #trait_mul
+      ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64>)
+      outs(%out: tensor<4x4xf64>) {
+        ^bb(%x: f64, %y : f64, %z : f64):
+          %1 = arith.mulf %x, %y : f64
+          %2 = arith.addf %1, %z : f64
+          linalg.yield %2 : f64
+    } -> tensor<4x4xf64>
+    return %0 : tensor<4x4xf64>
+  }
 
   //
-  // Output utilities.
+  // Output utility.
   //
-  func.func @dumpf64(%arg0: tensor<4x4xf64>) {
+  func.func @dump_dense_f64(%arg0: tensor<4x4xf64>) {
     %c0 = arith.constant 0 : index
     %d0 = arith.constant -1.0 : f64
     %0 = vector.transfer_read %arg0[%c0, %c0], %d0: tensor<4x4xf64>, vector<4x4xf64>
@@ -136,36 +134,32 @@ func.func @mul_dense(%arg0: tensor<4x8xf64>,
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %c2 = arith.constant 2 : index
-
 
     %td = arith.constant dense<[[ 1.0, 2.0,  0.0,  0.0,  0.0,  0.0,  4.0,  5.0],
                                 [ 6.0, 7.0,  0.0,  0.0,  0.0,  0.0, 10.0, 11.0],
                                 [ 0.0, 0.0, 12.0, 13.0, 16.0, 17.0,  0.0,  0.0],
                                 [ 0.0, 0.0, 18.0, 19.0, 22.0, 23.0,  0.0,  0.0]]> : tensor<4x8xf64>
 
-
-    %2 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #BSR>
-    %3 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #NV_24>
-    %4 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #CSR>
+    %a = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #BSR>
+    %b = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #NV_24>
+    %c = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #CSR>
 
     %d = call @mul_dense(%td, %td)
          : (tensor<4x8xf64>, tensor<4x8xf64>) -> tensor<4x4xf64>
-    %s = call @mul(%td, %2)
+    %s = call @mul(%td, %a)
          : (tensor<4x8xf64>, tensor<4x8xf64, #BSR>) -> tensor<4x4xf64>
-    %s24 = call @mul_24(%td, %3)
+    %s24 = call @mul_24(%td, %b)
          : (tensor<4x8xf64>, tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64>
-    %scsr = call @mul_csr_bsr(%4, %2)
+    %scsr = call @mul_csr_bsr(%c, %a)
          : (tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>) -> tensor<4x4xf64>
 
     // CHECK-COUNT-4: ( ( 46, 115, 0, 0 ), ( 115, 306, 0, 0 ), ( 0, 0, 858, 1206 ), ( 0, 0, 1206, 1698 ) )
-    call @dumpf64(%d) : (tensor<4x4xf64>) -> ()
-    call @dumpf64(%s) : (tensor<4x4xf64>) -> ()
-    call @dumpf64(%s24) : (tensor<4x4xf64>) -> ()
-    call @dumpf64(%scsr) : (tensor<4x4xf64>) -> ()
+    call @dump_dense_f64(%d)    : (tensor<4x4xf64>) -> ()
+    call @dump_dense_f64(%s)    : (tensor<4x4xf64>) -> ()
+    call @dump_dense_f64(%s24)  : (tensor<4x4xf64>) -> ()
+    call @dump_dense_f64(%scsr) : (tensor<4x4xf64>) -> ()
 
     return
   }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
index d97b1a93359517..9747da27f9e972 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -77,22 +77,8 @@ module {
     return %0 : tensor<?xcomplex<f32>, #SparseVector>
   }
 
-  func.func @dump(%arg0: tensor<?xcomplex<f32>, #SparseVector>, %d: index) {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %mem = sparse_tensor.values %arg0 : tensor<?xcomplex<f32>, #SparseVector> to memref<?xcomplex<f32>>
-    scf.for %i = %c0 to %d step %c1 {
-       %v = memref.load %mem[%i] : memref<?xcomplex<f32>>
-       %real = complex.re %v : complex<f32>
-       %imag = complex.im %v : complex<f32>
-       vector.print %real : f32
-       vector.print %imag : f32
-    }
-    return
-  }
-
   // Driver method to call and verify complex kernels.
-  func.func @entry() {
+  func.func @main() {
     // Setup sparse vectors.
     %v1 = arith.constant sparse<
        [ [0], [28], [31] ],
@@ -114,23 +100,26 @@ module {
     //
     // Verify the results.
     //
-    // CHECK: 511.13
-    // CHECK-NEXT: 2
-    // CHECK-NEXT: 1
-    // CHECK-NEXT: 0
-    // CHECK-NEXT: 5
-    // CHECK-NEXT: 4
-    // CHECK-NEXT: 8
-    // CHECK-NEXT: 6
-    // CHECK-NEXT: 6
-    // CHECK-NEXT: 8
-    // CHECK-NEXT: 15
-    // CHECK-NEXT: 18
+    // CHECK:   ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 4
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 4,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 28, 31,
+    // CHECK-NEXT: values : ( ( 511.13, 2 ), ( 1, 0 ), ( 5, 4 ), ( 8, 6 ),
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 2
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 2,
+    // CHECK-NEXT: crd[0] : ( 28, 31,
+    // CHECK-NEXT: values : ( ( 6, 8 ), ( 15, 18 ),
+    // CHECK-NEXT: ----
     //
-    %d1 = arith.constant 4 : index
-    %d2 = arith.constant 2 : index
-    call @dump(%0, %d1) : (tensor<?xcomplex<f32>, #SparseVector>, index) -> ()
-    call @dump(%1, %d2) : (tensor<?xcomplex<f32>, #SparseVector>, index) -> ()
+    sparse_tensor.print %0 : tensor<?xcomplex<f32>, #SparseVector>
+    sparse_tensor.print %1 : tensor<?xcomplex<f32>, #SparseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f32>, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
index 29008473d481ed..840895ffee3252 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -74,22 +74,8 @@ module {
     return %0 : tensor<?xcomplex<f64>, #SparseVector>
   }
 
-  func.func @dump(%arg0: tensor<?xcomplex<f64>, #SparseVector>, %d: index) {
-    %c0 = arith.constant 0 : index
-    %c1 = arith.constant 1 : index
-    %mem = sparse_tensor.values %arg0 : tensor<?xcomplex<f64>, #SparseVector> to memref<?xcomplex<f64>>
-    scf.for %i = %c0 to %d step %c1 {
-       %v = memref.load %mem[%i] : memref<?xcomplex<f64>>
-       %real = complex.re %v : complex<f64>
-       %imag = complex.im %v : complex<f64>
-       vector.print %real : f64
-       vector.print %imag : f64
-    }
-    return
-  }
-
   // Driver method to call and verify complex kernels.
-  func.func @entry() {
+  func.func @main() {
     // Setup sparse vectors.
     %v1 = arith.constant sparse<
        [ [0], [28], [31] ],
@@ -111,23 +97,26 @@ module {
     //
     // Verify the results.
     //
-    // CHECK: 511.13
-    // CHECK-NEXT: 2
-    // CHECK-NEXT: 1
-    // CHECK-NEXT: 0
-    // CHECK-NEXT: 5
-    // CHECK-NEXT: 4
-    // CHECK-NEXT: 8
-    // CHECK-NEXT: 6
-    // CHECK-NEXT: 6
-    // CHECK-NEXT: 8
-    // CHECK-NEXT: 15
-    // CHECK-NEXT: 18
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 4
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 4,  )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 28, 31,  )
+    // CHECK-NEXT: values : ( ( 511.13, 2 ), ( 1, 0 ), ( 5, 4 ), ( 8, 6 ),  )
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 2
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 2,  )
+    // CHECK-NEXT: crd[0] : ( 28, 31,  )
+    // CHECK-NEXT: values : ( ( 6, 8 ), ( 15, 18 ),  )
+    // CHECK-NEXT: ----
     //
-    %d1 = arith.constant 4 : index
-    %d2 = arith.constant 2 : index
-    call @dump(%0, %d1) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
-    call @dump(%1, %d2) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
+    sparse_tensor.print %0 : tensor<?xcomplex<f64>, #SparseVector>
+    sparse_tensor.print %1 : tensor<?xcomplex<f64>, #SparseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f64>, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseT...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/84055


More information about the Mlir-commits mailing list