[Mlir-commits] [mlir] [mlir][sparse] Migrate to sparse_tensor.print (PR #83946)

Yinying Li llvmlistbot at llvm.org
Wed Mar 6 14:34:56 PST 2024


https://github.com/yinying-lisa-li updated https://github.com/llvm/llvm-project/pull/83946

>From 9a9c8d858af921ad8180e2c3425f3b27d27e6ab5 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Tue, 5 Mar 2024 02:16:31 +0000
Subject: [PATCH 1/2] migrate to sparse_tensor.print

---
 .../SparseTensor/CPU/sparse_matvec.mlir       |  4 +-
 .../SparseTensor/CPU/sparse_mttkrp.mlir       |  4 +-
 .../SparseTensor/CPU/sparse_out_mult_elt.mlir | 19 ++++---
 .../CPU/sparse_out_reduction.mlir             | 26 ++++-----
 .../SparseTensor/CPU/sparse_out_simple.mlir   | 19 ++++---
 .../Dialect/SparseTensor/CPU/sparse_pack.mlir |  4 +-
 .../SparseTensor/CPU/sparse_pack_d.mlir       | 53 ++++++++++++-------
 7 files changed, 78 insertions(+), 51 deletions(-)

diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
index bdad713709afa2..b9d1148301dd16 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -88,7 +88,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %i0 = arith.constant 0 : i32
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
index 30e620b9d610ba..d615cb66c3d08d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -84,7 +84,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %f0 = arith.constant 0.0 : f64
     %cst0 = arith.constant 0 : index
     %cst1 = arith.constant 1 : index
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
index f7bcd1122d46c1..c30c6b9b5cc2f5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -60,7 +60,7 @@ module {
   }
 
   // Driver method to call and verify kernel.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %f0 = arith.constant 0.0 : f32
 
@@ -84,11 +84,18 @@ module {
     //
     // Verify results. Only two entries stored in result!
     //
-    // CHECK: ( 14, 20, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 2
+    // CHECK-NEXT: dim = ( 32, 16 )
+    // CHECK-NEXT: lvl = ( 32, 16 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 2, 31
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2
+    // CHECK-NEXT: crd[1] : ( 2, 0
+    // CHECK-NEXT: values : ( 14, 20
+    // CHECK-NEXT: ----
     //
-    %val = sparse_tensor.values %0 : tensor<32x16xf32, #DCSR> to memref<?xf32>
-    %vv = vector.transfer_read %val[%c0], %f0: memref<?xf32>, vector<4xf32>
-    vector.print %vv : vector<4xf32>
+    sparse_tensor.print %0 : tensor<32x16xf32, #DCSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %sta : tensor<32x16xf32, #DCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
index 17def3f52c003e..d572f4e14bfe2c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -70,7 +70,7 @@ module {
   }
 
   // Driver method to call and verify tensor kernel.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %i0 = arith.constant 0 : i32
 
@@ -94,23 +94,23 @@ module {
     //
     // Verify results. Only two entries stored in result. Correct structure.
     //
-    // CHECK: ( 7, 69, 0, 0 )
-    // CHECK-NEXT: ( ( 0, 0, 0 ), ( 0, 7, 0 ), ( 0, 0, 69 ) )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 2
+    // CHECK-NEXT: dim = ( 3, 3 )
+    // CHECK-NEXT: lvl = ( 3, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2
+    // CHECK-NEXT: crd[1] : ( 1, 2
+    // CHECK-NEXT: values : ( 7, 69
+    // CHECK-NEXT: ----
     //
-    %val = sparse_tensor.values %0
-      : tensor<?x?xi32, #SparseMatrix> to memref<?xi32>
-    %vv = vector.transfer_read %val[%c0], %i0: memref<?xi32>, vector<4xi32>
-    vector.print %vv : vector<4xi32>
-    %dm = sparse_tensor.convert %0
-      : tensor<?x?xi32, #SparseMatrix> to tensor<?x?xi32>
-    %vm = vector.transfer_read %dm[%c0, %c0], %i0: tensor<?x?xi32>, vector<3x3xi32>
-    vector.print %vm : vector<3x3xi32>
+    sparse_tensor.print %0 : tensor<?x?xi32, #SparseMatrix>
 
     // Release the resources.
     bufferization.dealloc_tensor %st1 : tensor<?x?x?xi32, #SparseTensor>
     bufferization.dealloc_tensor %st2 : tensor<?x?x?xi32, #SparseTensor>
     bufferization.dealloc_tensor %0 : tensor<?x?xi32, #SparseMatrix>
-    bufferization.dealloc_tensor %dm : tensor<?x?xi32>
 
     return
   }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
index e2d8c4fd4628d9..88513c80219a85 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -70,7 +70,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %d0 = arith.constant 0.0 : f64
     %c0 = arith.constant 0 : index
 
@@ -83,11 +83,18 @@ module {
 
     // Print the result for verification.
     //
-    // CHECK: ( 1, 1.96, 4, 6.25, 9, 16.81, 16, 27.04, 25 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 5, 5 )
+    // CHECK-NEXT: lvl = ( 5, 5 )
+    // CHECK-NEXT: pos[0] : ( 0, 5
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 5, 7, 9
+    // CHECK-NEXT: crd[1] : ( 0, 3, 1, 4, 2, 0, 3, 1, 4
+    // CHECK-NEXT: values : ( 1, 1.96, 4, 6.25, 9, 16.81, 16, 27.04, 25
+    // CHECK-NEXT: ----
     //
-    %m = sparse_tensor.values %0 : tensor<?x?xf64, #DCSR> to memref<?xf64>
-    %v = vector.transfer_read %m[%c0], %d0: memref<?xf64>, vector<9xf64>
-    vector.print %v : vector<9xf64>
+    sparse_tensor.print %0 : tensor<?x?xf64, #DCSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %x : tensor<?x?xf64, #DCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
index 2b9b73a1990e65..a8796d32a1e990 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -54,7 +54,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %f0 = arith.constant 0.0 : f64
     %i0 = arith.constant 0 : i32
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
index da816c7fbb1172..06e5ee070778c2 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -48,7 +48,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %f0 = arith.constant 0.0 : f32
 
@@ -108,26 +108,39 @@ module {
     //
     // Verify.
     //
-    // CHECK: ( ( ( 1, 2 ), ( 3, 4 ), ( 0, 0 ) ), ( ( 0, 0 ), ( 0, 0 ), ( 0, 0 ) ), ( ( 0, 0 ), ( 5, 0 ), ( 6, 7 ) ), ( ( 0, 0 ), ( 8, 0 ), ( 0, 0 ) ) )
-    // CHECK: ( ( ( 1, 2 ), ( 0, 3 ), ( 4, 0 ) ), ( ( 5, 6 ), ( 0, 0 ), ( 0, 7 ) ), ( ( 8, 9 ), ( 10, 11 ), ( 12, 13 ) ), ( ( 14, 0 ), ( 0, 15 ), ( 0, 16 ) ) )
-    // CHECK: ( ( ( 1, 2 ), ( 0, 3 ), ( 4, 0 ) ), ( ( 5, 6 ), ( 0, 0 ), ( 0, 7 ) ), ( ( 8, 9 ), ( 10, 11 ), ( 12, 13 ) ), ( ( 14, 0 ), ( 0, 15 ), ( 0, 16 ) ) )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 4, 3, 2 )
+    // CHECK-NEXT: lvl = ( 4, 3, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 5
+    // CHECK-NEXT: crd[1] : ( 0, 1, 1, 2, 1
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 5, 7, 8
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 0, 1, 0
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 16
+    // CHECK-NEXT: dim = ( 4, 3, 2 )
+    // CHECK-NEXT: lvl = ( 4, 3, 2 )
+    // CHECK-NEXT: pos[2] : ( 0, 2, 3, 4, 6, 6, 7, 9, 11, 13, 14, 15, 16
+    // CHECK-NEXT: crd[2] : ( 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 22
+    // CHECK-NEXT: dim = ( 4, 3, 2 )
+    // CHECK-NEXT: lvl = ( 4, 3, 2 )
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 8, 11
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 5, 6, 0, 7, 8, 9, 10, 11, 12, 13, 14, 0, 0, 15, 0, 16
+    // CHECK-NEXT: ----
     //
+    sparse_tensor.print %s0 : tensor<4x3x2xf32, #CCC>
+    sparse_tensor.print %s1 : tensor<4x3x2xf32, #BatchedCSR>
+    sparse_tensor.print %s2 : tensor<4x3x2xf32, #CSRDense>
 
-    %d0 = sparse_tensor.convert %s0 : tensor<4x3x2xf32, #CCC> to tensor<4x3x2xf32>
-    %v0 = vector.transfer_read %d0[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
-    vector.print %v0 : vector<4x3x2xf32>
-
-    %d1 = sparse_tensor.convert %s1 : tensor<4x3x2xf32, #BatchedCSR> to tensor<4x3x2xf32>
-    %v1 = vector.transfer_read %d1[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
-    vector.print %v1 : vector<4x3x2xf32>
-
-    %d2 = sparse_tensor.convert %s2 : tensor<4x3x2xf32, #CSRDense> to tensor<4x3x2xf32>
-    %v2 = vector.transfer_read %d1[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
-    vector.print %v2 : vector<4x3x2xf32>
-
-    bufferization.dealloc_tensor %d0 : tensor<4x3x2xf32>
-    bufferization.dealloc_tensor %d1 : tensor<4x3x2xf32>
-    bufferization.dealloc_tensor %d2 : tensor<4x3x2xf32>
     // FIXME: doing this explicitly crashes runtime
     // bufferization.dealloc_tensor %s0 : tensor<4x3x2xf32, #CCC>
     // bufferization.dealloc_tensor %s1 : tensor<4x3x2xf32, #BatchedCSR>

>From d3e9136b547bf13247f5a49cf6c4dd5735f8e6f0 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Wed, 6 Mar 2024 02:08:26 +0000
Subject: [PATCH 2/2] add more tests

---
 .../SparseTensor/CPU/sparse_pooling_nhwc.mlir | 27 ++++--
 .../CPU/sparse_quantized_matmul.mlir          |  4 +-
 .../CPU/sparse_reduce_custom.mlir             | 83 +++++++++----------
 .../CPU/sparse_reduce_custom_prod.mlir        | 78 +++++++++--------
 .../CPU/sparse_reduce_custom_sum.mlir         |  4 +-
 .../SparseTensor/CPU/sparse_reductions.mlir   |  4 +-
 .../CPU/sparse_reductions_min.mlir            |  4 +-
 .../CPU/sparse_reductions_prod.mlir           |  4 +-
 .../SparseTensor/CPU/sparse_reshape.mlir      | 53 ++++++++----
 .../CPU/sparse_rewrite_push_back.mlir         |  4 +-
 .../CPU/sparse_rewrite_sort_coo.mlir          |  4 +-
 .../CPU/sparse_sampled_matmul.mlir            |  4 +-
 .../CPU/sparse_sampled_mm_fusion.mlir         | 34 +++++---
 13 files changed, 171 insertions(+), 136 deletions(-)

diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
index 3ce089d7a7cf6b..39699fbdb14e59 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -47,7 +47,7 @@ func.func @pooling_nhwc_sum(%input: tensor<1x4x4x1xf32>, %filter: tensor<2x2xf32
 }
 
 
-func.func @entry() {
+func.func @main() {
   %c0 = arith.constant 0 : index
   %zero = arith.constant 0.00000e+00 : f32
 
@@ -76,17 +76,26 @@ func.func @entry() {
   //
   // Sparse pooling should have the same output.
   //
-
-  // CHECK-NEXT: ( ( ( ( 6 ), ( 6 ), ( 6 ) ), ( ( 6 ), ( 6 ), ( 6 ) ), ( ( 6 ), ( 6 ), ( 6 ) ) ) )
-  %s1 = sparse_tensor.convert %CCCC_ret : tensor<1x3x3x1xf32, #CCCC> to tensor<1x3x3x1xf32>
-  %v1 = vector.transfer_read %s1[%c0, %c0, %c0, %c0], %zero
-      : tensor<1x3x3x1xf32>, vector<1x3x3x1xf32>
-  vector.print %v1 : vector<1x3x3x1xf32>
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 9
+  // CHECK-NEXT: dim = ( 1, 3, 3, 1 )
+  // CHECK-NEXT: lvl = ( 1, 3, 3, 1 )
+  // CHECK-NEXT: pos[0] : ( 0, 1
+  // CHECK-NEXT: crd[0] : ( 0
+  // CHECK-NEXT: pos[1] : ( 0, 3
+  // CHECK-NEXT: crd[1] : ( 0, 1, 2
+  // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2
+  // CHECK-NEXT: pos[3] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
+  // CHECK-NEXT: crd[3] : ( 0, 0, 0, 0, 0, 0, 0, 0, 0
+  // CHECK-NEXT: values : ( 6, 6, 6, 6, 6, 6, 6, 6, 6
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CCCC_ret : tensor<1x3x3x1xf32, #CCCC>
 
   // Releases resources.
   bufferization.dealloc_tensor %in_CCCC : tensor<1x4x4x1xf32, #CCCC>
   bufferization.dealloc_tensor %CCCC_ret : tensor<1x3x3x1xf32, #CCCC>
   bufferization.dealloc_tensor %dense_ret : tensor<1x3x3x1xf32>
-  bufferization.dealloc_tensor %s1 : tensor<1x3x3x1xf32>
   return
 }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
index b322d965f2dc7e..873322929232a7 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -49,7 +49,7 @@ module {
     return %0: tensor<5x6xi32>
   }
 
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %i0 = arith.constant 0 : i32
 
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
index 17219cde035d34..a927a5dfb94bc2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -114,39 +114,8 @@ module {
     return %0 : tensor<?x?xf64, #CSR>
   }
 
-  // Dumps a sparse vector of type f64.
-  func.func @dump_vec(%arg0: tensor<?xf64, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<8xf64>
-    vector.print %1 : vector<8xf64>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
-    %2 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<16xf64>
-    vector.print %2 : vector<16xf64>
-    bufferization.dealloc_tensor %dv : tensor<?xf64>
-    return
-  }
-
-  // Dump a sparse matrix.
-  func.func @dump_mat(%arg0: tensor<?x?xf64, #CSR>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?x?xf64, #CSR> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
-    vector.print %1 : vector<16xf64>
-    %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #CSR> to tensor<?x?xf64>
-    %2 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<5x5xf64>
-    vector.print %2 : vector<5x5xf64>
-    bufferization.dealloc_tensor %dm : tensor<?x?xf64>
-    return
-  }
-
   // Driver method to call and verify vector kernels.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
 
     // Setup sparse matrices.
@@ -171,19 +140,43 @@ module {
     //
     // Verify the results.
     //
-    // CHECK:      ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 1, 2, 0, 0, 0 ), ( 3, 0, 0, 0, 0 ), ( 0, 0, 4, 5, 6 ), ( 7, 0, 8, 9, 0 ), ( 0, 0, 0, 0, 0 ) )
-    // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 6, 0, 0, 0, 0 ), ( 0, 0, 0, 5, 0 ), ( 4, 0, 0, 3, 0 ), ( 0, 2, 0, 0, 0 ), ( 0, 11, 0, 0, 0 ) )
-    // CHECK-NEXT: ( 7, 7, 9, 8, 7, 7, 12, 11, 11, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 7, 0, 0, 7, 0 ), ( 9, 0, 0, 0, 0 ), ( 8, 7, 0, 7, 0 ), ( 12, 11, 0, 11, 0 ), ( 0, 0, 0, 0, 0 ) )
-    // CHECK-NEXT: ( 7, 7, 9, 8, 7, 7, 12, 11, 11, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 7, 0, 0, 7, 0 ), ( 9, 0, 0, 0, 0 ), ( 8, 7, 0, 7, 0 ), ( 12, 11, 0, 11, 0 ), ( 0, 0, 0, 0, 0 ) )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 4, 5 )
+    // CHECK-NEXT: lvl = ( 4, 5 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 2, 3, 4, 0, 2, 3
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 5, 4 )
+    // CHECK-NEXT: lvl = ( 5, 4 )
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2, 4, 5, 6
+    // CHECK-NEXT: crd[1] : ( 0, 3, 0, 3, 1, 1
+    // CHECK-NEXT: values : ( 6, 5, 4, 3, 2, 11
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+    // CHECK-NEXT: crd[1] : ( 0, 3, 0, 0, 1, 3, 0, 1, 3
+    // CHECK-NEXT: values : ( 7, 7, 9, 8, 7, 7, 12, 11, 11
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+    // CHECK-NEXT: crd[1] : ( 0, 3, 0, 0, 1, 3, 0, 1, 3
+    // CHECK-NEXT: values : ( 7, 7, 9, 8, 7, 7, 12, 11, 11
+    // CHECK-NEXT: ----
     //
-    call @dump_mat(%sm1) : (tensor<?x?xf64, #CSR>) -> ()
-    call @dump_mat(%sm2r) : (tensor<?x?xf64, #CSR>) -> ()
-    call @dump_mat(%5) : (tensor<?x?xf64, #CSR>) -> ()
-    call @dump_mat(%6) : (tensor<?x?xf64, #CSR>) -> ()
+    sparse_tensor.print %sm1 : tensor<?x?xf64, #CSR>
+    sparse_tensor.print %sm2r : tensor<?x?xf64, #CSR>
+    sparse_tensor.print %5 : tensor<?x?xf64, #CSR>
+    sparse_tensor.print %6 : tensor<?x?xf64, #CSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %sm1 : tensor<?x?xf64, #CSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
index 6e2c572cf21ba0..18bf6a71c53058 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -89,39 +89,9 @@ module {
     return %0 : tensor<?xf64, #SparseVector>
   }
 
-  // Dumps a sparse vector of type f64.
-  func.func @dump_vec(%arg0: tensor<?xf64, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<8xf64>
-    vector.print %1 : vector<8xf64>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
-    %2 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<16xf64>
-    vector.print %2 : vector<16xf64>
-    bufferization.dealloc_tensor %dv : tensor<?xf64>
-    return
-  }
-
-  // Dump a sparse matrix.
-  func.func @dump_mat(%arg0: tensor<?x?xf64, #CSR>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?x?xf64, #CSR> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
-    vector.print %1 : vector<16xf64>
-    %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #CSR> to tensor<?x?xf64>
-    %2 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<5x5xf64>
-    vector.print %2 : vector<5x5xf64>
-    bufferization.dealloc_tensor %dm : tensor<?x?xf64>
-    return
-  }
 
   // Driver method to call and verify vector kernels.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
 
     // Setup sparse matrices.
@@ -144,15 +114,43 @@ module {
     //
     // Verify the results.
     //
-    // CHECK: ( 2, 3, 120, 504, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 2, 3, 120, 504, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 6, 5, 12, 2, 11, 0, 0, 0 )
-    // CHECK-NEXT: ( 6, 5, 12, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 4, 5 )
+    // CHECK-NEXT: lvl = ( 4, 5 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 2, 3, 4, 0, 2, 3
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 5, 4 )
+    // CHECK-NEXT: lvl = ( 5, 4 )
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2, 4, 5, 6
+    // CHECK-NEXT: crd[1] : ( 0, 3, 0, 3, 1, 1
+    // CHECK-NEXT: values : ( 6, 5, 4, 3, 2, 11
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 4
+    // CHECK-NEXT: dim = ( 4 )
+    // CHECK-NEXT: lvl = ( 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 2, 3, 120, 504
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 5
+    // CHECK-NEXT: dim = ( 5 )
+    // CHECK-NEXT: lvl = ( 5 )
+    // CHECK-NEXT: pos[0] : ( 0, 5
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4
+    // CHECK-NEXT: values : ( 6, 5, 12, 2, 11
+    // CHECK-NEXT: ----
     //
-    call @dump_mat(%sm1) : (tensor<?x?xf64, #CSR>) -> ()
-    call @dump_mat(%sm2r) : (tensor<?x?xf64, #CSR>) -> ()
-    call @dump_vec(%1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_vec(%2) : (tensor<?xf64, #SparseVector>) -> ()
+    sparse_tensor.print %sm1 : tensor<?x?xf64, #CSR>
+    sparse_tensor.print %sm2r : tensor<?x?xf64, #CSR>
+    sparse_tensor.print %1 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %2 : tensor<?xf64, #SparseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sm1 : tensor<?x?xf64, #CSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_sum.mlir
index 80c35676e804b1..8588ebd98cc918 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_sum.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_sum.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -145,7 +145,7 @@ module {
     return
   }
 
-  func.func @entry() {
+  func.func @main() {
     %ri = arith.constant dense<0> : tensor<i32>
 
     //  Sparse vector of length 8 with 2 stored elements (and thus 6 implicit zeros).
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
index 4ad23d1c031238..96ec8bad8b5515 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -106,7 +106,7 @@ module {
     return
   }
 
-  func.func @entry() {
+  func.func @main() {
     %ri = arith.constant dense< 7   > : tensor<i32>
     %rf = arith.constant dense< 2.0 > : tensor<f32>
 
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir
index 198920024221b0..16c1d7df7e89dd 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -82,7 +82,7 @@ module {
     return
   }
 
-  func.func @entry() {
+  func.func @main() {
     %ri = arith.constant dense<999> : tensor<i32>
 
     // Vectors with a few zeros.
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_prod.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_prod.mlir
index 7ec30787fea464..4797fbb8f5319c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_prod.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_prod.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -162,7 +162,7 @@ module {
     return
   }
 
-  func.func @entry() {
+  func.func @main() {
     // Note: Constants bufferize to read-only buffers.
     %ri = arith.constant dense< 7   > : tensor<i32>
     %rf = arith.constant dense< 2.0 > : tensor<f32>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir
index b551f9545dc436..4c26ebe6e401ba 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -63,7 +63,7 @@ module {
   }
 
 
-  func.func @entry() {
+  func.func @main() {
     %m = arith.constant dense <[ [ 1.1,  0.0,  1.3,  0.0 ],
                                  [ 2.1,  0.0,  2.3,  0.0 ],
                                  [ 3.1,  0.0,  3.3,  0.0 ]]> : tensor<3x4xf64>
@@ -76,20 +76,41 @@ module {
     %c0 = arith.constant 0 : index
     %df = arith.constant -1.0 : f64
 
-    // CHECK: ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
-    %b0 = sparse_tensor.values %reshaped0: tensor<2x6xf64, #SparseMatrix> to memref<?xf64>
-    %v0 = vector.transfer_read %b0[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %v0 : vector<12xf64>
-
-    // CHECK: ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
-    %b1 = sparse_tensor.values %reshaped1: tensor<12xf64, #SparseVector> to memref<?xf64>
-    %v1 = vector.transfer_read %b1[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %v1 : vector<12xf64>
-
-    // CHECK: ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
-    %b2 = sparse_tensor.values %reshaped2: tensor<2x3x2xf64, #Sparse3dTensor> to memref<?xf64>
-    %v2 = vector.transfer_read %b2[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %v2: vector<12xf64>
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 2, 6 )
+    // CHECK-NEXT: lvl = ( 2, 6 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 0, 2, 4
+    // CHECK-NEXT: values : ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 12 )
+    // CHECK-NEXT: lvl = ( 12 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 2, 4, 6, 8, 10
+    // CHECK-NEXT: values : ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 2, 3, 2 )
+    // CHECK-NEXT: lvl = ( 2, 3, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: pos[2] : ( 0, 1, 2, 3, 4, 5, 6
+    // CHECK-NEXT: crd[2] : ( 0, 0, 0, 0, 0, 0
+    // CHECK-NEXT: values : ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %reshaped0: tensor<2x6xf64, #SparseMatrix>
+    sparse_tensor.print %reshaped1: tensor<12xf64, #SparseVector>
+    sparse_tensor.print %reshaped2: tensor<2x3x2xf64, #Sparse3dTensor>
 
     bufferization.dealloc_tensor %sm : tensor<3x4xf64, #SparseMatrix>
     bufferization.dealloc_tensor %reshaped0 : tensor<2x6xf64, #SparseMatrix>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir
index c2e83fc61c4b53..1536249e60f286 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -28,7 +28,7 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 module {
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %c10 = arith.constant 10 : index
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir
index e7dd0ad32a2430..0682bc6f314fd1 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -64,7 +64,7 @@ module {
   }
 
   // The main driver.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : i32
     %c1 = arith.constant 1 : i32
     %c2 = arith.constant 2 : i32
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
index 3330d2249707fc..085b36a368704d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -81,7 +81,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %d0 = arith.constant 0.0 : f32
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
index afaf36c4072c09..20a8c5f812de9b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -169,7 +169,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %d0 = arith.constant 0.0 : f64
     %c0 = arith.constant 0 : index
 
@@ -207,22 +207,36 @@ module {
     // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0 ),
     // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 192 ) )
     //
-    // CHECK-NEXT: ( 96, 192, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 2
+    // CHECK-NEXT: dim = ( 8, 8 )
+    // CHECK-NEXT: lvl = ( 8, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 7
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2
+    // CHECK-NEXT: crd[1] : ( 0, 7
+    // CHECK-NEXT: values : ( 96, 192
+    // CHECK-NEXT: ----
     //
-    // CHECK-NEXT: ( 96, 192, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 2
+    // CHECK-NEXT: dim = ( 8, 8 )
+    // CHECK-NEXT: lvl = ( 8, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 7
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2
+    // CHECK-NEXT: crd[1] : ( 0, 7
+    // CHECK-NEXT: values : ( 96, 192
+    // CHECK-NEXT: ----
     //
-    %m2 = sparse_tensor.values %2 : tensor<8x8xf64, #SM> to memref<?xf64>
-    %m3 = sparse_tensor.values %3 : tensor<8x8xf64, #SM> to memref<?xf64>
     %v0 = vector.transfer_read %0[%c0, %c0], %d0
         : tensor<8x8xf64>, vector<8x8xf64>
     %v1 = vector.transfer_read %1[%c0, %c0], %d0
         : tensor<8x8xf64>, vector<8x8xf64>
-    %v2 = vector.transfer_read %m2[%c0], %d0 : memref<?xf64>, vector<4xf64>
-    %v3 = vector.transfer_read %m3[%c0], %d0 : memref<?xf64>, vector<4xf64>
     vector.print %v0 : vector<8x8xf64>
     vector.print %v1 : vector<8x8xf64>
-    vector.print %v2 : vector<4xf64>
-    vector.print %v3 : vector<4xf64>
+    sparse_tensor.print %2 : tensor<8x8xf64, #SM>
+    sparse_tensor.print %3 : tensor<8x8xf64, #SM>
 
     // Release the resources.
     bufferization.dealloc_tensor %s : tensor<8x8xf64, #SM>



More information about the Mlir-commits mailing list