[Mlir-commits] [mlir] 4cb5a96 - [mlir][sparse] Migrate more tests to sparse_tensor.print (#84249)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Mar 7 11:02:25 PST 2024


Author: Yinying Li
Date: 2024-03-07T14:02:20-05:00
New Revision: 4cb5a96af646e18f9fc8c1b337299d5465f0a4d6

URL: https://github.com/llvm/llvm-project/commit/4cb5a96af646e18f9fc8c1b337299d5465f0a4d6
DIFF: https://github.com/llvm/llvm-project/commit/4cb5a96af646e18f9fc8c1b337299d5465f0a4d6.diff

LOG: [mlir][sparse] Migrate more tests to sparse_tensor.print (#84249)

Continuous efforts following #83946.

Added: 
    

Modified: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
index 6ec13fd623b5cd..4e9090ae201d02 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -65,7 +65,7 @@ module {
   // and then calls the sparse scaling kernel with the sparse tensor
   // as input argument.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %f0 = arith.constant 0.0 : f32
 
@@ -88,11 +88,16 @@ module {
 
     // Print the resulting compacted values for verification.
     //
-    // CHECK: ( 2, 2, 2, 4, 6, 8, 2, 10, 2, 2, 12, 2, 14, 2, 2, 16 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 16
+    // CHECK-NEXT: dim = ( 8, 8 )
+    // CHECK-NEXT: lvl = ( 8, 8 )
+    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 5, 6, 8, 11, 14, 16
+    // CHECK-NEXT: crd[1] : ( 0, 2, 7, 1, 2, 3, 1, 4, 1, 2, 5, 2, 6, 7, 2, 7
+    // CHECK-NEXT: values : ( 2, 2, 2, 4, 6, 8, 2, 10, 2, 2, 12, 2, 14, 2, 2, 16
+    // CHECK-NEXT: ----
     //
-    %m = sparse_tensor.values %2 : tensor<8x8xf32, #CSR> to memref<?xf32>
-    %v = vector.transfer_read %m[%c0], %f0: memref<?xf32>, vector<16xf32>
-    vector.print %v : vector<16xf32>
+    sparse_tensor.print %2 : tensor<8x8xf32, #CSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %1 : tensor<8x8xf32, #CSR>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
index 439144fedeeb89..dd8396dc23b036 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -68,17 +68,7 @@ module @func_sparse.2 {
     return %1 : tensor<2x3x4xf64, #SparseMatrix>
   }
 
-  func.func @dump(%arg0: tensor<2x3x4xf64, #SparseMatrix>) {
-    %d0 = arith.constant 0.0 : f64
-    %c0 = arith.constant 0 : index
-    %dm = sparse_tensor.convert %arg0 : tensor<2x3x4xf64, #SparseMatrix> to tensor<2x3x4xf64>
-    %0 = vector.transfer_read %dm[%c0, %c0, %c0], %d0: tensor<2x3x4xf64>, vector<2x3x4xf64>
-    vector.print %0 : vector<2x3x4xf64>
-    bufferization.dealloc_tensor %dm : tensor<2x3x4xf64>
-    return
-  }
-
-  func.func public @entry() {
+  func.func public @main() {
     %src = arith.constant dense<[
      [  [  1.0,  2.0,  3.0,  4.0 ],
         [  5.0,  6.0,  7.0,  8.0 ],
@@ -96,10 +86,34 @@ module @func_sparse.2 {
     %sm_t = call @condition(%t, %sm) : (i1, tensor<2x3x4xf64, #SparseMatrix>) -> tensor<2x3x4xf64, #SparseMatrix>
     %sm_f = call @condition(%f, %sm) : (i1, tensor<2x3x4xf64, #SparseMatrix>) -> tensor<2x3x4xf64, #SparseMatrix>
 
-    // CHECK:      ( ( ( 0, 1, 2, 3 ), ( 4, 5, 6, 7 ), ( 8, 9, 10, 11 ) ), ( ( 12, 13, 14, 15 ), ( 16, 17, 18, 19 ), ( 20, 21, 22, 23 ) ) )
-    // CHECK-NEXT: ( ( ( 2, 3, 4, 5 ), ( 6, 7, 8, 9 ), ( 10, 11, 12, 13 ) ), ( ( 14, 15, 16, 17 ), ( 18, 19, 20, 21 ), ( 22, 23, 24, 25 ) ) )
-    call @dump(%sm_t) : (tensor<2x3x4xf64, #SparseMatrix>) -> ()
-    call @dump(%sm_f) : (tensor<2x3x4xf64, #SparseMatrix>) -> ()
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 2, 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 2, 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %sm_t : tensor<2x3x4xf64, #SparseMatrix>
+    sparse_tensor.print %sm_f : tensor<2x3x4xf64, #SparseMatrix>
 
     bufferization.dealloc_tensor %sm : tensor<2x3x4xf64, #SparseMatrix>
     bufferization.dealloc_tensor %sm_t : tensor<2x3x4xf64, #SparseMatrix>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
index 533afb6644aeda..68bc17175e3b4b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -97,39 +97,8 @@ module {
     return %0 : tensor<?x?xf64, #CSR>
   }
 
-  // Dumps a sparse vector of type f64.
-  func.func @dump_vec(%arg0: tensor<?xf64, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<8xf64>
-    vector.print %1 : vector<8xf64>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
-    %2 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<16xf64>
-    vector.print %2 : vector<16xf64>
-    bufferization.dealloc_tensor %dv : tensor<?xf64>
-    return
-  }
-
-  // Dump a sparse matrix.
-  func.func @dump_mat(%arg0: tensor<?x?xf64, #CSR>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?x?xf64, #CSR> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
-    vector.print %1 : vector<16xf64>
-    %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #CSR> to tensor<?x?xf64>
-    %2 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<5x5xf64>
-    vector.print %2 : vector<5x5xf64>
-    bufferization.dealloc_tensor %dm : tensor<?x?xf64>
-    return
-  }
-
   // Driver method to call and verify vector kernels.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
 
     // Setup sparse matrices.
@@ -151,19 +120,43 @@ module {
     //
     // Verify the results.
     //
-    // CHECK:      ( 1, 2, -4, 0, 5, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 2, 0, -4, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 0, 0, 0, 1, 0 ), ( 0, 0, 0, 0, 2 ), ( 0, 3, 0, 4, 0 ), ( 0, 0, 0, 5, 6 ), ( 0, 0, 7, 0, 0 ) )
-    // CHECK-NEXT: ( 1, 2, 5, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 2, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 1, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 0, 0, 0, 1, 0 ), ( 0, 0, 0, 0, 2 ), ( 0, 0, 0, 4, 0 ), ( 0, 0, 0, 0, 6 ), ( 0, 0, 0, 0, 0 ) )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 5
+    // CHECK-NEXT: dim = ( 10 )
+    // CHECK-NEXT: lvl = ( 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 5
+    // CHECK-NEXT: crd[0] : ( 1, 3, 5, 7, 9
+    // CHECK-NEXT: values : ( 1, 2, -4, 0, 5
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 5, 5 )
+    // CHECK-NEXT: lvl = ( 5, 5 )
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2, 4, 6, 7
+    // CHECK-NEXT: crd[1] : ( 3, 4, 1, 3, 3, 4, 2
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 3
+    // CHECK-NEXT: dim = ( 10 )
+    // CHECK-NEXT: lvl = ( 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 1, 3, 9
+    // CHECK-NEXT: values : ( 1, 2, 5
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 4
+    // CHECK-NEXT: dim = ( 5, 5 )
+    // CHECK-NEXT: lvl = ( 5, 5 )
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2, 3, 4, 4
+    // CHECK-NEXT: crd[1] : ( 3, 4, 3, 4
+    // CHECK-NEXT: values : ( 1, 2, 4, 6
+    // CHECK-NEXT: ----
     //
-    call @dump_vec(%sv1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_mat(%sm1) : (tensor<?x?xf64, #CSR>) -> ()
-    call @dump_vec(%1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_mat(%2) : (tensor<?x?xf64, #CSR>) -> ()
+    sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %sm1 : tensor<?x?xf64, #CSR>
+    sparse_tensor.print %1 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %2 : tensor<?x?xf64, #CSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
index 6244be0ba7ab64..f4435c81117b2d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -60,7 +60,7 @@ module {
   }
 
   // Driver method to call and verify vector kernels.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0   : index
     %f0 = arith.constant 0.0 : f64
 
@@ -86,20 +86,24 @@ module {
                                                  tensor<5x5xf64, #DCSR>) -> tensor<5x5xf64, #DCSR>
 
 
-    // CHECK:     ( ( 0.1, 1.1, 0, 0, 0 ),
-    // CHECK-SAME:  ( 0, 1.1, 2.2, 0, 0 ),
-    // CHECK-SAME:  ( 0, 0, 2.1, 3.3, 0 ),
-    // CHECK-SAME:  ( 0, 0, 0, 3.1, 4.4 ),
-    // CHECK-SAME:  ( 0, 0, 0, 0, 4.1 ) )
-    %r = sparse_tensor.convert %1 : tensor<5x5xf64, #DCSR> to tensor<5x5xf64>
-    %v2 = vector.transfer_read %r[%c0, %c0], %f0 : tensor<5x5xf64>, vector<5x5xf64>
-    vector.print %v2 : vector<5x5xf64>
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 5, 5 )
+    // CHECK-NEXT: lvl = ( 5, 5 )
+    // CHECK-NEXT: pos[0] : ( 0, 5
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8, 9
+    // CHECK-NEXT: crd[1] : ( 0, 1, 1, 2, 2, 3, 3, 4, 4
+    // CHECK-NEXT: values : ( 0.1, 1.1, 1.1, 2.2, 2.1, 3.3, 3.1, 4.4, 4.1
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %1 : tensor<5x5xf64, #DCSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %sl: tensor<5x5xf64, #DCSR>
     bufferization.dealloc_tensor %sr: tensor<5x5xf64, #DCSR>
     bufferization.dealloc_tensor %1:  tensor<5x5xf64, #DCSR>
-    bufferization.dealloc_tensor %r : tensor<5x5xf64>
 
     return
   }

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
index 08e75dfa2c02ca..c09374918b7d6a 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -79,7 +79,7 @@ module {
   }
 
   // Driver method to call and verify sign kernel.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %du = arith.constant 0.0 : f64
 
@@ -110,11 +110,16 @@ module {
     //
     // Verify the results.
     //
-    // CHECK: ( -1, 1, -1, 1, 1, -1, nan, -nan, 1, -1, -0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 12
+    // CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 20, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( -1, 1, -1, 1, 1, -1, nan, -nan, 1, -1, -0, 0
+    // CHECK-NEXT: ----
     //
-    %1 = sparse_tensor.values %0 : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %2 = vector.transfer_read %1[%c0], %du: memref<?xf64>, vector<13xf64>
-    vector.print %2 : vector<13xf64>
+    sparse_tensor.print %0 : tensor<?xf64, #SparseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
index e0111f692601f0..7b3f9a2ce0e012 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -35,19 +35,19 @@
 !Filename = !llvm.ptr
 
 #SortedCOO = #sparse_tensor.encoding<{
-  map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
+  map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton(soa))
 }>
 
 #SortedCOOPermuted = #sparse_tensor.encoding<{
-  map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton),
+  map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton(soa)),
 }>
 
 #SortedCOO3D = #sparse_tensor.encoding<{
-  map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton)
+  map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique, soa), d2 : singleton(soa))
 }>
 
 #SortedCOO3DPermuted = #sparse_tensor.encoding<{
-  map = (d0, d1, d2) -> (d2 : compressed(nonunique), d0 : singleton(nonunique), d1 : singleton)
+  map = (d0, d1, d2) -> (d2 : compressed(nonunique), d0 : singleton(nonunique, soa), d1 : singleton(soa))
 
 }>
 
@@ -82,29 +82,7 @@ module {
     return %0 : tensor<?x?xf64, #SortedCOO>
   }
 
-  func.func @dumpi(%arg0: memref<?xindex>) {
-    %c0 = arith.constant 0 : index
-    %v = vector.transfer_read %arg0[%c0], %c0: memref<?xindex>, vector<20xindex>
-    vector.print %v : vector<20xindex>
-    return
-  }
-
-  func.func @dumpsi(%arg0: memref<?xindex, strided<[?], offset: ?>>) {
-    %c0 = arith.constant 0 : index
-    %v = vector.transfer_read %arg0[%c0], %c0: memref<?xindex, strided<[?], offset: ?>>, vector<20xindex>
-    vector.print %v : vector<20xindex>
-    return
-  }
-
-  func.func @dumpf(%arg0: memref<?xf64>) {
-    %c0 = arith.constant 0 : index
-    %nan = arith.constant 0x0 : f64
-    %v = vector.transfer_read %arg0[%c0], %nan: memref<?xf64>, vector<20xf64>
-    vector.print %v : vector<20xf64>
-    return
-  }
-
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
 
@@ -125,130 +103,88 @@ module {
     %4 = sparse_tensor.convert %m : tensor<5x4xf64> to tensor<?x?xf64, #SortedCOO>
 
     //
-    // CHECK:      ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255, 0, 0, 0 )
-    // CHECK-NEXT: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, 0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 17
+    // CHECK-NEXT: dim = ( 4, 256 )
+    // CHECK-NEXT: lvl = ( 4, 256 )
+    // CHECK-NEXT: pos[0] : ( 0, 17
+    // CHECK-NEXT: crd[0] : ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+    // CHECK-NEXT: crd[1] : ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255
+    // CHECK-NEXT: values : ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17
+    // CHECK-NEXT: ----
     //
-    %p0 = sparse_tensor.positions %0 { level = 0 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
-    %i00 = sparse_tensor.coordinates %0 { level = 0 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
-    %i01 = sparse_tensor.coordinates %0 { level = 1 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
-    %v0 = sparse_tensor.values %0
-      : tensor<?x?xf64, #SortedCOO> to memref<?xf64>
-    call @dumpi(%p0)  : (memref<?xindex>) -> ()
-    call @dumpsi(%i00) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpsi(%i01) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpf(%v0)  : (memref<?xf64>) -> ()
+    sparse_tensor.print %0 : tensor<?x?xf64, #SortedCOO>
 
     //
-    // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 1, 1, 2, 3, 98, 126, 126, 127, 127, 128, 249, 253, 253, 254, 255, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 3, 1, 3, 2, 3, 3, 0, 3, 0, 3, 3, 3, 1, 3, 0, 3, 0, 0, 0 )
-    // CHECK-NEXT: ( -1, 8, -5, -9, -7, 10, -11, 2, 12, -3, -13, 14, -15, 6, 16, 4, -17, 0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 17
+    // CHECK-NEXT: dim = ( 4, 256 )
+    // CHECK-NEXT: lvl = ( 256, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 17
+    // CHECK-NEXT: crd[0] : ( 0, 0, 1, 1, 2, 3, 98, 126, 126, 127, 127, 128, 249, 253, 253, 254, 255
+    // CHECK-NEXT: crd[1] : ( 0, 3, 1, 3, 2, 3, 3, 0, 3, 0, 3, 3, 3, 1, 3, 0, 3
+    // CHECK-NEXT: values : ( -1, 8, -5, -9, -7, 10, -11, 2, 12, -3, -13, 14, -15, 6, 16, 4, -17
+    // CHECK-NEXT: ----
     //
-    %p1 = sparse_tensor.positions %1 { level = 0 : index }
-      : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex>
-    %i10 = sparse_tensor.coordinates %1 { level = 0 : index }
-      : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex, strided<[?], offset: ?>>
-    %i11 = sparse_tensor.coordinates %1 { level = 1 : index }
-      : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex, strided<[?], offset: ?>>
-    %v1 = sparse_tensor.values %1
-      : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xf64>
-    call @dumpi(%p1)  : (memref<?xindex>) -> ()
-    call @dumpsi(%i10) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpsi(%i11) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpf(%v1)  : (memref<?xf64>) -> ()
+    sparse_tensor.print %1 : tensor<?x?xf64, #SortedCOOPermuted>
 
     //
-    // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 )
-    // CHECK-NEXT: ( 3, 63, 11, 100, 66, 61, 13, 43, 77, 10, 46, 61, 53, 3, 75, 22, 18, 0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 17
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 2, 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 17
+    // CHECK-NEXT: crd[0] : ( 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1
+    // CHECK-NEXT: crd[1] : ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2
+    // CHECK-NEXT: crd[2] : ( 2, 3, 1, 2, 0, 1, 2, 3, 0, 2, 3, 0, 1, 2, 3, 1, 2
+    // CHECK-NEXT: values : ( 3, 63, 11, 100, 66, 61, 13, 43, 77, 10, 46, 61, 53, 3, 75, 22, 18
+    // CHECK-NEXT: ----
     //
-    %p2 = sparse_tensor.positions %2 { level = 0 : index }
-      : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
-    %i20 = sparse_tensor.coordinates %2 { level = 0 : index }
-      : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex, strided<[?], offset: ?>>
-    %i21 = sparse_tensor.coordinates %2 { level = 1 : index }
-      : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex, strided<[?], offset: ?>>
-    %i22 = sparse_tensor.coordinates %2 { level = 2 : index }
-      : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex, strided<[?], offset: ?>>
-    %v2 = sparse_tensor.values %2
-      : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xf64>
-    call @dumpi(%p2)  : (memref<?xindex>) -> ()
-    call @dumpsi(%i20) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpsi(%i21) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpsi(%i21) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpf(%v2)  : (memref<?xf64>) -> ()
+    sparse_tensor.print %2 : tensor<?x?x?xf64, #SortedCOO3D>
 
     //
-    // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 )
-    // CHECK-NEXT: ( 66, 77, 61, 11, 61, 53, 22, 3, 100, 13, 10, 3, 18, 63, 43, 46, 75, 0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 17
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 4, 2, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 17
+    // CHECK-NEXT: crd[0] : ( 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3
+    // CHECK-NEXT: crd[1] : ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1
+    // CHECK-NEXT: crd[2] : ( 2, 0, 1, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 0, 1
+    // CHECK-NEXT: values : ( 66, 77, 61, 11, 61, 53, 22, 3, 100, 13, 10, 3, 18, 63, 43, 46, 75
+    // CHECK-NEXT: ----
     //
-    %p3 = sparse_tensor.positions %3 { level = 0 : index }
-      : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
-    %i30 = sparse_tensor.coordinates %3 { level = 0 : index }
-      : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex, strided<[?], offset: ?>>
-    %i31 = sparse_tensor.coordinates %3 { level = 1 : index }
-      : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex, strided<[?], offset: ?>>
-    %i32 = sparse_tensor.coordinates %3 { level = 2 : index }
-      : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex, strided<[?], offset: ?>>
-    %v3 = sparse_tensor.values %3
-      : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xf64>
-    call @dumpi(%p3)  : (memref<?xindex>) -> ()
-    call @dumpsi(%i30) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpsi(%i31) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpsi(%i31) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpf(%v3)  : (memref<?xf64>) -> ()
+    sparse_tensor.print %3 : tensor<?x?x?xf64, #SortedCOO3DPermuted>
 
     //
-    // CHECK-NEXT: ( 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-   // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 5, 4 )
+    // CHECK-NEXT: lvl = ( 5, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 2, 3, 4
+    // CHECK-NEXT: crd[1] : ( 0, 3, 0, 3, 1, 1
+    // CHECK-NEXT: values : ( 6, 5, 4, 3, 2, 11
+    // CHECK-NEXT: ----
     //
-    %p4 = sparse_tensor.positions %4 { level = 0 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
-    %i40 = sparse_tensor.coordinates %4 { level = 0 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
-    %i41 = sparse_tensor.coordinates %4 { level = 1 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
-    %v4 = sparse_tensor.values %4
-      : tensor<?x?xf64, #SortedCOO> to memref<?xf64>
-    call @dumpi(%p4)  : (memref<?xindex>) -> ()
-    call @dumpsi(%i40) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpsi(%i41) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpf(%v4)  : (memref<?xf64>) -> ()
+    sparse_tensor.print %4 : tensor<?x?xf64, #SortedCOO>
 
     // And last but not least, an actual operation applied to COO.
     // Note that this performs the operation "in place".
     %5 = call @sparse_scale(%4) : (tensor<?x?xf64, #SortedCOO>) -> tensor<?x?xf64, #SortedCOO>
 
     //
-    // CHECK-NEXT: ( 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 12, 10, 8, 6, 4, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 5, 4 )
+    // CHECK-NEXT: lvl = ( 5, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 2, 3, 4
+    // CHECK-NEXT: crd[1] : ( 0, 3, 0, 3, 1, 1
+    // CHECK-NEXT: values : ( 12, 10, 8, 6, 4, 22
+    // CHECK-NEXT: ----
     //
-    %p5 = sparse_tensor.positions %5 { level = 0 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
-    %i50 = sparse_tensor.coordinates %5 { level = 0 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
-    %i51 = sparse_tensor.coordinates %5 { level = 1 : index }
-      : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
-    %v5 = sparse_tensor.values %5
-      : tensor<?x?xf64, #SortedCOO> to memref<?xf64>
-    call @dumpi(%p5)  : (memref<?xindex>) -> ()
-    call @dumpsi(%i50) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpsi(%i51) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
-    call @dumpf(%v5)  : (memref<?xf64>) -> ()
+    sparse_tensor.print %5 : tensor<?x?xf64, #SortedCOO>
 
     // Release the resources.
     bufferization.dealloc_tensor %0 : tensor<?x?xf64, #SortedCOO>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
index 573b1a2aac2598..ca8bcd7744c8f4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -76,7 +76,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %i0 = arith.constant 0.0 : f64
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
index 8ca95f2139e49a..2ee189de7906ca 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -72,7 +72,7 @@ module {
   // are typically not concerned with such details, but the test ensures
   // everything is working "under the hood".
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %d0 = arith.constant 0.0 : f64
@@ -107,166 +107,103 @@ module {
     //
     // Inspect storage scheme of Dense.
     //
-    // CHECK:    ( 1, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
-    // CHECK-SAME: 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0,
-    // CHECK-SAME: 0, 0, 0, 0, 6, 0, 0, 0, 0, 7, 8, 0, 0, 0, 0, 9,
-    // CHECK-SAME: 0, 0, 10, 0, 0, 0, 11, 12, 0, 13, 14, 0, 0, 0, 15, 16,
-    // CHECK-SAME: 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 80
+    // CHECK-NEXT: dim = ( 10, 8 )
+    // CHECK-NEXT: lvl = ( 10, 8 )
+    // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 7, 8, 0, 0, 0, 0, 9, 0, 0, 10, 0, 0, 0, 11, 12, 0, 13, 14, 0, 0, 0, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0
+    // CHECK-NEXT: ----
     //
-    %5 = sparse_tensor.values %0 : tensor<10x8xf64, #Dense> to memref<?xf64>
-    %6 = vector.transfer_read %5[%c0], %d0: memref<?xf64>, vector<80xf64>
-    vector.print %6 : vector<80xf64>
+    sparse_tensor.print %0 : tensor<10x8xf64, #Dense>
 
     //
     // Inspect storage scheme of CSR.
     //
-    // positions(1)
-    // indices(1)
-    // values
     //
-    // CHECK: ( 0, 3, 3, 4, 5, 6, 9, 12, 16, 16, 17 )
-    // CHECK: ( 0, 2, 7, 2, 3, 4, 1, 2, 7, 2, 6, 7, 1, 2, 6, 7, 6 )
-    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 17
+    // CHECK-NEXT: dim = ( 10, 8 )
+    // CHECK-NEXT: lvl = ( 10, 8 )
+    // CHECK-NEXT: pos[1] : ( 0, 3, 3, 4, 5, 6, 9, 12, 16, 16, 17
+    // CHECK-NEXT: crd[1] : ( 0, 2, 7, 2, 3, 4, 1, 2, 7, 2, 6, 7, 1, 2, 6, 7, 6
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
+    // CHECK-NEXT: ----
     //
-    %7 = sparse_tensor.positions %1 { level = 1 : index } : tensor<10x8xf64, #CSR> to memref<?xindex>
-    %8 = vector.transfer_read %7[%c0], %c0: memref<?xindex>, vector<11xindex>
-    vector.print %8 : vector<11xindex>
-    %9 = sparse_tensor.coordinates %1 { level = 1 : index } : tensor<10x8xf64, #CSR> to memref<?xindex>
-    %10 = vector.transfer_read %9[%c0], %c0: memref<?xindex>, vector<17xindex>
-    vector.print %10 : vector<17xindex>
-    %11 = sparse_tensor.values %1 : tensor<10x8xf64, #CSR> to memref<?xf64>
-    %12 = vector.transfer_read %11[%c0], %d0: memref<?xf64>, vector<17xf64>
-    vector.print %12 : vector<17xf64>
+    sparse_tensor.print %1 : tensor<10x8xf64, #CSR>
 
     //
     // Inspect storage scheme of DCSR.
     //
-    // positions(0)
-    // indices(0)
-    // positions(1)
-    // indices(1)
-    // values
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 17
+    // CHECK-NEXT: dim = ( 10, 8 )
+    // CHECK-NEXT: lvl = ( 10, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 8
+    // CHECK-NEXT: crd[0] : ( 0, 2, 3, 4, 5, 6, 7, 9
+    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 5, 6, 9, 12, 16, 17
+    // CHECK-NEXT: crd[1] : ( 0, 2, 7, 2, 3, 4, 1, 2, 7, 2, 6, 7, 1, 2, 6, 7, 6
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
+    // CHECK-NEXT: ----
     //
-    // CHECK: ( 0, 8 )
-    // CHECK: ( 0, 2, 3, 4, 5, 6, 7, 9 )
-    // CHECK: ( 0, 3, 4, 5, 6, 9, 12, 16, 17 )
-    // CHECK: ( 0, 2, 7, 2, 3, 4, 1, 2, 7, 2, 6, 7, 1, 2, 6, 7, 6 )
-    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 )
-    //
-    %13 = sparse_tensor.positions %2 { level = 0 : index } : tensor<10x8xf64, #DCSR> to memref<?xindex>
-    %14 = vector.transfer_read %13[%c0], %c0: memref<?xindex>, vector<2xindex>
-    vector.print %14 : vector<2xindex>
-    %15 = sparse_tensor.coordinates %2 { level = 0 : index } : tensor<10x8xf64, #DCSR> to memref<?xindex>
-    %16 = vector.transfer_read %15[%c0], %c0: memref<?xindex>, vector<8xindex>
-    vector.print %16 : vector<8xindex>
-    %17 = sparse_tensor.positions %2 { level = 1 : index } : tensor<10x8xf64, #DCSR> to memref<?xindex>
-    %18 = vector.transfer_read %17[%c0], %c0: memref<?xindex>, vector<9xindex>
-    vector.print %18 : vector<9xindex>
-    %19 = sparse_tensor.coordinates %2 { level = 1 : index } : tensor<10x8xf64, #DCSR> to memref<?xindex>
-    %20 = vector.transfer_read %19[%c0], %c0: memref<?xindex>, vector<17xindex>
-    vector.print %20 : vector<17xindex>
-    %21 = sparse_tensor.values %2 : tensor<10x8xf64, #DCSR> to memref<?xf64>
-    %22 = vector.transfer_read %21[%c0], %d0: memref<?xf64>, vector<17xf64>
-    vector.print %22 : vector<17xf64>
+    sparse_tensor.print %2 : tensor<10x8xf64, #DCSR>
 
     //
     // Inspect storage scheme of CSC.
     //
-    // positions(1)
-    // indices(1)
-    // values
-    //
-    // CHECK: ( 0, 1, 3, 8, 9, 10, 10, 13, 17 )
-    // CHECK: ( 0, 5, 7, 0, 2, 5, 6, 7, 3, 4, 6, 7, 9, 0, 5, 6, 7 )
-    // CHECK: ( 1, 7, 13, 2, 4, 8, 10, 14, 5, 6, 11, 15, 17, 3, 9, 12, 16 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 17
+    // CHECK-NEXT: dim = ( 10, 8 )
+    // CHECK-NEXT: lvl = ( 8, 10 )
+    // CHECK-NEXT: pos[1] : ( 0, 1, 3, 8, 9, 10, 10, 13, 17
+    // CHECK-NEXT: crd[1] : ( 0, 5, 7, 0, 2, 5, 6, 7, 3, 4, 6, 7, 9, 0, 5, 6, 7
+    // CHECK-NEXT: values : ( 1, 7, 13, 2, 4, 8, 10, 14, 5, 6, 11, 15, 17, 3, 9, 12, 16
+    // CHECK-NEXT: ----
     //
-    %23 = sparse_tensor.positions %3 { level = 1 : index } : tensor<10x8xf64, #CSC> to memref<?xindex>
-    %24 = vector.transfer_read %23[%c0], %c0: memref<?xindex>, vector<9xindex>
-    vector.print %24 : vector<9xindex>
-    %25 = sparse_tensor.coordinates %3 { level = 1 : index } : tensor<10x8xf64, #CSC> to memref<?xindex>
-    %26 = vector.transfer_read %25[%c0], %c0: memref<?xindex>, vector<17xindex>
-    vector.print %26 : vector<17xindex>
-    %27 = sparse_tensor.values %3 : tensor<10x8xf64, #CSC> to memref<?xf64>
-    %28 = vector.transfer_read %27[%c0], %d0: memref<?xf64>, vector<17xf64>
-    vector.print %28 : vector<17xf64>
+    sparse_tensor.print %3 : tensor<10x8xf64, #CSC>
 
     //
     // Inspect storage scheme of DCSC.
     //
-    // positions(0)
-    // indices(0)
-    // positions(1)
-    // indices(1)
-    // values
-    //
-    // CHECK: ( 0, 7 )
-    // CHECK: ( 0, 1, 2, 3, 4, 6, 7 )
-    // CHECK: ( 0, 1, 3, 8, 9, 10, 13, 17 )
-    // CHECK: ( 0, 5, 7, 0, 2, 5, 6, 7, 3, 4, 6, 7, 9, 0, 5, 6, 7 )
-    // CHECK: ( 1, 7, 13, 2, 4, 8, 10, 14, 5, 6, 11, 15, 17, 3, 9, 12, 16 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 17
+    // CHECK-NEXT: dim = ( 10, 8 )
+    // CHECK-NEXT: lvl = ( 8, 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 7
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 6, 7
+    // CHECK-NEXT: pos[1] : ( 0, 1, 3, 8, 9, 10, 13, 17
+    // CHECK-NEXT: crd[1] : ( 0, 5, 7, 0, 2, 5, 6, 7, 3, 4, 6, 7, 9, 0, 5, 6, 7
+    // CHECK-NEXT: values : ( 1, 7, 13, 2, 4, 8, 10, 14, 5, 6, 11, 15, 17, 3, 9, 12, 16
+    // CHECK-NEXT: ----
     //
-    %29 = sparse_tensor.positions %4 { level = 0 : index } : tensor<10x8xf64, #DCSC> to memref<?xindex>
-    %30 = vector.transfer_read %29[%c0], %c0: memref<?xindex>, vector<2xindex>
-    vector.print %30 : vector<2xindex>
-    %31 = sparse_tensor.coordinates %4 { level = 0 : index } : tensor<10x8xf64, #DCSC> to memref<?xindex>
-    %32 = vector.transfer_read %31[%c0], %c0: memref<?xindex>, vector<7xindex>
-    vector.print %32 : vector<7xindex>
-    %33 = sparse_tensor.positions %4 { level = 1 : index } : tensor<10x8xf64, #DCSC> to memref<?xindex>
-    %34 = vector.transfer_read %33[%c0], %c0: memref<?xindex>, vector<8xindex>
-    vector.print %34 : vector<8xindex>
-    %35 = sparse_tensor.coordinates %4 { level = 1 : index } : tensor<10x8xf64, #DCSC> to memref<?xindex>
-    %36 = vector.transfer_read %35[%c0], %c0: memref<?xindex>, vector<17xindex>
-    vector.print %36 : vector<17xindex>
-    %37 = sparse_tensor.values %4 : tensor<10x8xf64, #DCSC> to memref<?xf64>
-    %38 = vector.transfer_read %37[%c0], %d0: memref<?xf64>, vector<17xf64>
-    vector.print %38 : vector<17xf64>
+    sparse_tensor.print %4 : tensor<10x8xf64, #DCSC>
 
     //
     // Inspect storage scheme of BlockRow.
     //
-    // positions(0)
-    // indices(0)
-    // values
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 64
+    // CHECK-NEXT: dim = ( 10, 8 )
+    // CHECK-NEXT: lvl = ( 10, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 8
+    // CHECK-NEXT: crd[0] : ( 0, 2, 3, 4, 5, 6, 7, 9
+    // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 7, 8, 0, 0, 0, 0, 9, 0, 0, 10, 0, 0, 0, 11, 12, 0, 13, 14, 0, 0, 0, 15, 16, 0, 0, 0, 0, 0, 0, 17, 0
+    // CHECK-NEXT: ----
     //
-    // CHECK: ( 0, 8 )
-    // CHECK: ( 0, 2, 3, 4, 5, 6, 7, 9 )
-    // CHECK: ( 1, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4, 0, 0, 0, 0, 0,
-    // CHECK-SAME: 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0,
-    // CHECK-SAME: 0, 7, 8, 0, 0, 0, 0, 9, 0, 0, 10, 0, 0, 0, 11, 12,
-    // CHECK-SAME: 0, 13, 14, 0, 0, 0, 15, 16, 0, 0, 0, 0, 0, 0, 17, 0 )
-    //
-    %39 = sparse_tensor.positions %x { level = 0 : index } : tensor<10x8xf64, #BlockRow> to memref<?xindex>
-    %40 = vector.transfer_read %39[%c0], %c0: memref<?xindex>, vector<2xindex>
-    vector.print %40 : vector<2xindex>
-    %41 = sparse_tensor.coordinates %x { level = 0 : index } : tensor<10x8xf64, #BlockRow> to memref<?xindex>
-    %42 = vector.transfer_read %41[%c0], %c0: memref<?xindex>, vector<8xindex>
-    vector.print %42 : vector<8xindex>
-    %43 = sparse_tensor.values %x : tensor<10x8xf64, #BlockRow> to memref<?xf64>
-    %44 = vector.transfer_read %43[%c0], %d0: memref<?xf64>, vector<64xf64>
-    vector.print %44 : vector<64xf64>
+    sparse_tensor.print %x : tensor<10x8xf64, #BlockRow>
 
     //
     // Inspect storage scheme of BlockCol.
     //
-    // positions(0)
-    // indices(0)
-    // values
-    //
-    // CHECK: ( 0, 7 )
-    // CHECK: ( 0, 1, 2, 3, 4, 6, 7 )
-    // CHECK: ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 13, 0, 0, 2, 0, 4, 0,
-    // CHECK-SAME: 0, 8, 10, 14, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
-    // CHECK-SAME: 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 15, 0, 17, 3, 0, 0, 0, 0, 9, 12, 16, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 70
+    // CHECK-NEXT: dim = ( 10, 8 )
+    // CHECK-NEXT: lvl = ( 8, 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 7
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 6, 7
+    // CHECK-NEXT: values : ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 13, 0, 0, 2, 0, 4, 0, 0, 8, 10, 14, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 15, 0, 17, 3, 0, 0, 0, 0, 9, 12, 16, 0, 0
+    // CHECK-NEXT: ----
     //
-    %45 = sparse_tensor.positions %y { level = 0 : index } : tensor<10x8xf64, #BlockCol> to memref<?xindex>
-    %46 = vector.transfer_read %45[%c0], %c0: memref<?xindex>, vector<2xindex>
-    vector.print %46 : vector<2xindex>
-    %47 = sparse_tensor.coordinates %y { level = 0 : index } : tensor<10x8xf64, #BlockCol> to memref<?xindex>
-    %48 = vector.transfer_read %47[%c0], %c0: memref<?xindex>, vector<7xindex>
-    vector.print %48 : vector<7xindex>
-    %49 = sparse_tensor.values %y : tensor<10x8xf64, #BlockCol> to memref<?xf64>
-    %50 = vector.transfer_read %49[%c0], %d0: memref<?xf64>, vector<70xf64>
-    vector.print %50 : vector<70xf64>
+    sparse_tensor.print %y : tensor<10x8xf64, #BlockCol>
 
     // Release the resources.
     bufferization.dealloc_tensor %0 : tensor<10x8xf64, #Dense>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir
index 5184083f665d56..2b2b8536fe39ed 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -78,7 +78,7 @@ func.func @conv_2d_nhwc_hwcf_dual_CDCC(%arg0: tensor<?x?x?x?xf32, #CDCC>, %arg1:
 }
 
 
-func.func @entry() {
+func.func @main() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
index e6cbff231024ed..d1c58bfb6d59ef 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -75,7 +75,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %d0 = arith.constant 0.0 : f64
     %c0 = arith.constant 0 : index
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir
index ee00a19a412306..16a8b50ab08e5c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -67,7 +67,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     // Setup input sparse matrix from compressed constant.
     %d = arith.constant dense <[
        [ 1.1,  1.2,  0.0,  1.4 ],

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
index 5fdf636ef1230a..f95c163a57c164 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -75,7 +75,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     //%d0 = arith.constant 0.0 : complex<f64>
     %d0 = complex.constant [0.0 : f64, 0.0 : f64] : complex<f64>
     %c0 = arith.constant 0 : index

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
index 6a34695229495d..30be587c8f6119 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -66,7 +66,7 @@ module {
   //
   // Main driver that reads matrix from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     // Setup input sparse matrix from compressed constant.
     %d = arith.constant dense <[
        [ 1.1,  1.2,  0.0,  1.4 ],

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir
index 336044d5660057..29bc744c992032 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -56,28 +56,8 @@ module {
     return %0 : tensor<?xf64, #SparseVector>
   }
 
-  // Dumps a sparse vector of type f64.
-  func.func @dump_vec_f64(%arg0: tensor<?xf64, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant -1.0 : f64
-    %n = sparse_tensor.number_of_entries %arg0: tensor<?xf64, #SparseVector>
-    vector.print %n : index
-    %0 = sparse_tensor.values %arg0
-      : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<9xf64>
-    vector.print %1 : vector<9xf64>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0
-        : tensor<?xf64, #SparseVector> to tensor<?xf64>
-    %3 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<32xf64>
-    vector.print %3 : vector<32xf64>
-    bufferization.dealloc_tensor %dv : tensor<?xf64>
-    return
-  }
-
   // Driver method to call and verify vector kernels.
-  func.func @entry() {
+  func.func @main() {
     // Setup sparse vector.
     %v1 = arith.constant sparse<
        [ [0], [3], [11], [17], [20], [21], [28], [29], [31] ],
@@ -93,11 +73,16 @@ module {
     //
     // Verify the results (within some precision).
     //
-    // CHECK:      9
-    // CHECK-NEXT: {{( -0.761[0-9]*, 0.761[0-9]*, 0.96[0-9]*, 0.99[0-9]*, 0.99[0-9]*, 0.99[0-9]*, 0.99[0-9]*, 0.99[0-9]*, 1 )}}
-    // CHECK-NEXT: {{( -0.761[0-9]*, 0, 0, 0.761[0-9]*, 0, 0, 0, 0, 0, 0, 0, 0.96[0-9]*, 0, 0, 0, 0, 0, 0.99[0-9]*, 0, 0, 0.99[0-9]*, 0.99[0-9]*, 0, 0, 0, 0, 0, 0, 0.99[0-9]*, 0.99[0-9]*, 0, 1 )}}
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31
+    // CHECK-NEXT: values : ({{ -0.761[0-9]*, 0.761[0-9]*, 0.96[0-9]*, 0.99[0-9]*, 0.99[0-9]*, 0.99[0-9]*, 0.99[0-9]*, 0.99[0-9]*, 1}}
+    // CHECK-NEXT: ----
     //
-    call @dump_vec_f64(%0) : (tensor<?xf64, #SparseVector>) -> ()
+    sparse_tensor.print %0 : tensor<?xf64, #SparseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir
index d53b03025f5588..67155201c58442 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -67,7 +67,7 @@ module {
   }
 
   // Driver method to call and verify tensor multiplication kernel.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %default_val = arith.constant -1.0 : f64
 
@@ -103,30 +103,28 @@ module {
     %0 = call @tensor_mul(%sta, %stb)
       : (tensor<?x?x?xf64, #ST>, tensor<?x?x?xf64, #ST>) -> tensor<?x?x?xf64, #ST>
 
-    // Verify results
     //
-    // CHECK:      4
-    // CHECK-NEXT: ( 2.4, 3.5, 2, 8 )
-    // CHECK-NEXT: ( ( ( 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0 ), ( 2.4, 0, 3.5, 0, 0 ) ),
-    // CHECK-SAME: ( ( 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0 ) ),
-    // CHECK-SAME: ( ( 2, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0 ), ( 0, 0, 8, 0, 0 ) ) )
+    // Verify results.
     //
-    %n = sparse_tensor.number_of_entries %0 : tensor<?x?x?xf64, #ST>
-    vector.print %n : index
-    %m1 = sparse_tensor.values %0  : tensor<?x?x?xf64, #ST> to memref<?xf64>
-    %v1 = vector.transfer_read %m1[%c0], %default_val: memref<?xf64>, vector<4xf64>
-    vector.print %v1 : vector<4xf64>
-
-    // Print %0 in dense form.
-    %dt = sparse_tensor.convert %0 : tensor<?x?x?xf64, #ST> to tensor<?x?x?xf64>
-    %v2 = vector.transfer_read %dt[%c0, %c0, %c0], %default_val: tensor<?x?x?xf64>, vector<3x3x5xf64>
-    vector.print %v2 : vector<3x3x5xf64>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 4
+    // CHECK-NEXT: dim = ( 3, 3, 5 )
+    // CHECK-NEXT: lvl = ( 3, 3, 5 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 2
+    // CHECK-NEXT: pos[1] : ( 0, 1, 3
+    // CHECK-NEXT: crd[1] : ( 2, 0, 2
+    // CHECK-NEXT: pos[2] : ( 0, 2, 3, 4
+    // CHECK-NEXT: crd[2] : ( 0, 2, 0, 2
+    // CHECK-NEXT: values : ( 2.4, 3.5, 2, 8
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %0 : tensor<?x?x?xf64, #ST>
 
     // Release the resources.
     bufferization.dealloc_tensor %sta : tensor<?x?x?xf64, #ST>
     bufferization.dealloc_tensor %stb : tensor<?x?x?xf64, #ST>
     bufferization.dealloc_tensor %0  : tensor<?x?x?xf64, #ST>
-    bufferization.dealloc_tensor %dt : tensor<?x?x?xf64>
 
     return
   }

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir
index 6ef6b393019a8e..356808ebee3f7c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -67,7 +67,7 @@ module {
   }
 
   // Driver method to call and verify tensor kernel.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %d1 = arith.constant -1.0 : f64
 
@@ -90,22 +90,34 @@ module {
     // Call sparse vector kernels.
     %0 = call @tensor_scale(%st) : (tensor<?x?x?xf64, #ST1>) -> tensor<?x?x?xf64, #ST2>
 
+    //
     // Sanity check on stored values.
     //
-    // CHECK:      5
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5 )
-    // CHECK-NEXT: 24
-    // CHECK-NEXT: ( 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 6, 8, 0, 0, 0, 0, 10 )
-    %m1 = sparse_tensor.values %st : tensor<?x?x?xf64, #ST1> to memref<?xf64>
-    %m2 = sparse_tensor.values %0  : tensor<?x?x?xf64, #ST2> to memref<?xf64>
-    %n1 = sparse_tensor.number_of_entries %st : tensor<?x?x?xf64, #ST1>
-    %n2 = sparse_tensor.number_of_entries %0 : tensor<?x?x?xf64, #ST2>
-    %v1 = vector.transfer_read %m1[%c0], %d1: memref<?xf64>, vector<5xf64>
-    %v2 = vector.transfer_read %m2[%c0], %d1: memref<?xf64>, vector<24xf64>
-    vector.print %n1 : index
-    vector.print %v1 : vector<5xf64>
-    vector.print %n2 : index
-    vector.print %v2 : vector<24xf64>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 5
+    // CHECK-NEXT: dim = ( 3, 4, 8 )
+    // CHECK-NEXT: lvl = ( 3, 4, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 2
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3
+    // CHECK-NEXT: crd[1] : ( 0, 3, 2
+    // CHECK-NEXT: pos[2] : ( 0, 1, 2, 5
+    // CHECK-NEXT: crd[2] : ( 0, 7, 1, 2, 7
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 3, 4, 8 )
+    // CHECK-NEXT: lvl = ( 3, 4, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 2
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3
+    // CHECK-NEXT: crd[1] : ( 0, 3, 2
+    // CHECK-NEXT: values : ( 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 6, 8, 0, 0, 0, 0, 10
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %st : tensor<?x?x?xf64, #ST1>
+    sparse_tensor.print %0  : tensor<?x?x?xf64, #ST2>
 
     // Release the resources.
     bufferization.dealloc_tensor %st : tensor<?x?x?xf64, #ST1>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir
index 185f6161493e04..549c2082fcb3ac 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -92,7 +92,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %c4 = arith.constant 4 : index
@@ -115,26 +115,29 @@ module {
     //
     // Verify result.
     //
-    // CHECK:      ( 1.1, 0, 3.1 )
-    // CHECK-NEXT: ( 1.2, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 3.3 )
-    // CHECK-NEXT: ( 1.4, 0, 3.4 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 4, 3 )
+    // CHECK-NEXT: lvl = ( 4, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 4, 6
+    // CHECK-NEXT: crd[1] : ( 0, 2, 0, 2, 0, 2
+    // CHECK-NEXT: values : ( 1.1, 3.1, 1.2, 3.3, 1.4, 3.4
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 4, 3 )
+    // CHECK-NEXT: lvl = ( 4, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 4, 6
+    // CHECK-NEXT: crd[1] : ( 0, 2, 0, 2, 0, 2
+    // CHECK-NEXT: values : ( 1.1, 3.1, 1.2, 3.3, 1.4, 3.4
+    // CHECK-NEXT: ----
     //
-    // CHECK-NEXT: ( 1.1, 0, 3.1 )
-    // CHECK-NEXT: ( 1.2, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 3.3 )
-    // CHECK-NEXT: ( 1.4, 0, 3.4 )
-    //
-    %x = sparse_tensor.convert %0 : tensor<4x3xf64, #DCSR> to tensor<4x3xf64>
-    scf.for %i = %c0 to %c4 step %c1 {
-      %v1 = vector.transfer_read %x[%i, %c0], %du: tensor<4x3xf64>, vector<3xf64>
-      vector.print %v1 : vector<3xf64>
-    }
-    %y = sparse_tensor.convert %1 : tensor<4x3xf64, #DCSR> to tensor<4x3xf64>
-    scf.for %i = %c0 to %c4 step %c1 {
-      %v2 = vector.transfer_read %y[%i, %c0], %du: tensor<4x3xf64>, vector<3xf64>
-      vector.print %v2 : vector<3xf64>
-    }
+    sparse_tensor.print %0 : tensor<4x3xf64, #DCSR>
+    sparse_tensor.print %1 : tensor<4x3xf64, #DCSR>
 
     // Release resources.
     bufferization.dealloc_tensor %a : tensor<3x4xf64, #DCSR>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
index dba897334830ad..cc6f6a068746d0 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -31,7 +31,7 @@
 // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
 
 #SortedCOO = #sparse_tensor.encoding<{
-  map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
+  map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton(soa))
 }>
 
 module {
@@ -52,7 +52,7 @@ module {
     return %1 : tensor<5x10xf32, #SortedCOO>
   }
 
-  func.func @entry() {
+  func.func @main() {
     %f0  = arith.constant 0.0 : f32
     %c0  = arith.constant 0   : index
     %c1  = arith.constant 1   : index
@@ -79,17 +79,27 @@ module {
     //
     // Verify original and transposed sorted COO.
     //
-    // CHECK:      ( 10, 20, 30, 40, 50, 11, 21, 31, 41, 51, 12, 22, 32, 42, 52, 13, 23, 33, 43, 53, 14, 24, 34, 44, 54, 15, 25, 35, 45, 55, 16, 26, 36, 46, 56, 17, 27, 37, 47, 57, 18, 28, 38, 48, 58, 19, 29, 39, 49, 59 )
-    // CHECK-NEXT: ( 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 50
+    // CHECK-NEXT: dim = ( 10, 5 )
+    // CHECK-NEXT: lvl = ( 10, 5 )
+    // CHECK-NEXT: pos[0] : ( 0, 50
+    // CHECK-NEXT: crd[0] : ( 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4
+    // CHECK-NEXT: values : ( 10, 20, 30, 40, 50, 11, 21, 31, 41, 51, 12, 22, 32, 42, 52, 13, 23, 33, 43, 53, 14, 24, 34, 44, 54, 15, 25, 35, 45, 55, 16, 26, 36, 46, 56, 17, 27, 37, 47, 57, 18, 28, 38, 48, 58, 19, 29, 39, 49, 59
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 50
+    // CHECK-NEXT: dim = ( 5, 10 )
+    // CHECK-NEXT: lvl = ( 5, 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 50
+    // CHECK-NEXT: crd[0] : ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
+    // CHECK-NEXT: values : ( 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59
+    // CHECK-NEXT: ----
     //
-    %va  = sparse_tensor.values %SA
-      : tensor<10x5xf32, #SortedCOO> to memref<?xf32>
-    %vat = sparse_tensor.values %SAT
-      : tensor<5x10xf32, #SortedCOO> to memref<?xf32>
-    %v1 = vector.transfer_read %va[%c0],  %f0 : memref<?xf32>, vector<50xf32>
-    %v2 = vector.transfer_read %vat[%c0], %f0 : memref<?xf32>, vector<50xf32>
-    vector.print %v1 : vector<50xf32>
-    vector.print %v2 : vector<50xf32>
+    sparse_tensor.print %SA : tensor<10x5xf32, #SortedCOO>
+    sparse_tensor.print %SAT : tensor<5x10xf32, #SortedCOO>
 
     // Release resources.
     bufferization.dealloc_tensor %SA  : tensor<10x5xf32, #SortedCOO>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
index e03f99253b7845..3da1e35818cfa5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -204,54 +204,8 @@ module {
     return %0 : tensor<?x?xf64, #DCSR>
   }
 
-  // Dumps a sparse vector of type f64.
-  func.func @dump_vec_f64(%arg0: tensor<?xf64, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<32xf64>
-    vector.print %1 : vector<32xf64>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
-    %3 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<32xf64>
-    vector.print %3 : vector<32xf64>
-    bufferization.dealloc_tensor %dv : tensor<?xf64>
-    return
-  }
-
-  // Dumps a sparse vector of type i32.
-  func.func @dump_vec_i32(%arg0: tensor<?xi32, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0 : i32
-    %0 = sparse_tensor.values %arg0 : tensor<?xi32, #SparseVector> to memref<?xi32>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xi32>, vector<24xi32>
-    vector.print %1 : vector<24xi32>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
-    %3 = vector.transfer_read %dv[%c0], %d0: tensor<?xi32>, vector<32xi32>
-    vector.print %3 : vector<32xi32>
-    bufferization.dealloc_tensor %dv : tensor<?xi32>
-    return
-  }
-
-  // Dump a sparse matrix.
-  func.func @dump_mat(%arg0: tensor<?x?xf64, #DCSR>) {
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?x?xf64, #DCSR> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
-    vector.print %1 : vector<16xf64>
-    %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64>
-    %3 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<4x8xf64>
-    vector.print %3 : vector<4x8xf64>
-    bufferization.dealloc_tensor %dm : tensor<?x?xf64>
-    return
-  }
-
   // Driver method to call and verify vector kernels.
-  func.func @entry() {
+  func.func @main() {
     %cmu = arith.constant -99 : i32
     %c0 = arith.constant 0 : index
 
@@ -289,26 +243,66 @@ module {
     //
     // Verify the results.
     //
-    // CHECK:      ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
-    // CHECK-NEXT: ( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 )
-    // CHECK-NEXT: ( 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0 )
-    // CHECK-NEXT: ( -1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1, -3, 1, 1, 1, 1, 1, -4, 1, 1, -5, -6, 1, 1, 1, 1, 1, 1, -7, -8, 1, -9 )
-    // CHECK-NEXT: ( -1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1, -3, 1, 1, 1, 1, 1, -4, 1, 1, -5, -6, 1, 1, 1, 1, 1, 1, -7, -8, 1, -9 )
-    // CHECK-NEXT: ( 0, 6, 33, 68, 100, 126, 196, 232, 279, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 33, 0, 0, 0, 0, 0, 68, 0, 0, 100, 126, 0, 0, 0, 0, 0, 0, 196, 232, 0, 279 )
-    // CHECK-NEXT: ( 3, 3, 3, 4, 5, 6, 7, 7, 7, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 3, 3, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 3 ), ( 0, 0, 4, 0, 5, 0, 0, 6 ), ( 7, 0, 7, 7, 0, 0, 0, 0 ) )
-    // CHECK-NEXT: ( 99, 99, 99, 99, 5, 6, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 99, 99, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 99 ), ( 0, 0, 99, 0, 5, 0, 0, 6 ), ( 99, 0, 99, 99, 0, 0, 0, 0 ) )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 23
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 23
+    // CHECK-NEXT: crd[0] : ( 1, 2, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 18, 19, 22, 23, 24, 25, 26, 27, 30
+    // CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 32
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 32
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+    // CHECK-NEXT: values : ( -1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1, -3, 1, 1, 1, 1, 1, -4, 1, 1, -5, -6, 1, 1, 1, 1, 1, 1, -7, -8, 1, -9
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( 0, 6, 33, 68, 100, 126, 196, 232, 279
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 4, 8 )
+    // CHECK-NEXT: lvl = ( 4, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+    // CHECK-NEXT: crd[1] : ( 0, 1, 7, 2, 4, 7, 0, 2, 3
+    // CHECK-NEXT: values : ( 3, 3, 3, 4, 5, 6, 7, 7, 7
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 4, 8 )
+    // CHECK-NEXT: lvl = ( 4, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+    // CHECK-NEXT: crd[1] : ( 0, 1, 7, 2, 4, 7, 0, 2, 3
+    // CHECK-NEXT: values : ( 99, 99, 99, 99, 5, 6, 99, 99, 99
+    // CHECK-NEXT: ----
     // CHECK-NEXT: ( 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0 )
     //
-    call @dump_vec_f64(%sv1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_vec_i32(%0) : (tensor<?xi32, #SparseVector>) -> ()
-    call @dump_vec_f64(%1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_vec_f64(%2) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_mat(%3) : (tensor<?x?xf64, #DCSR>) -> ()
-    call @dump_mat(%4) : (tensor<?x?xf64, #DCSR>) -> ()
+    sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %0 : tensor<?xi32, #SparseVector>
+    sparse_tensor.print %1 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %2 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %3 : tensor<?x?xf64, #DCSR>
+    sparse_tensor.print %4 : tensor<?x?xf64, #DCSR>
     %v = vector.transfer_read %5[%c0], %cmu: tensor<?xi32>, vector<32xi32>
     vector.print %v : vector<32xi32>
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir
index d9ca2dca85342a..55332333164130 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -162,24 +162,8 @@ module {
     return %0 : tensor<f64>
   }
 
-  // Dumps a sparse vector.
-  func.func @dump(%arg0: tensor<?xf64, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
-    vector.print %1 : vector<16xf64>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
-    %2 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<32xf64>
-    vector.print %2 : vector<32xf64>
-    bufferization.dealloc_tensor %dv : tensor<?xf64>
-    return
-  }
-
   // Driver method to call and verify vector kernels.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %d1 = arith.constant 1.1 : f64
 
@@ -221,31 +205,69 @@ module {
     //
     // Verify the results.
     //
-    // CHECK:      ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
-    // CHECK-NEXT: ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 11, 0, 12, 13, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 15, 0, 16, 0, 0, 17, 0, 0, 0, 0, 0, 0, 18, 19, 0, 20 )
-    // CHECK-NEXT: ( 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 2, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 8, 0, 0, 10, 12, 0, 0, 0, 0, 0, 0, 14, 16, 0, 18 )
-    // CHECK-NEXT: ( 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 2, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 8, 0, 0, 10, 12, 0, 0, 0, 0, 0, 0, 14, 16, 0, 18 )
-    // CHECK-NEXT: ( 2, 11, 16, 13, 14, 6, 15, 8, 16, 10, 29, 32, 35, 38, 0, 0 )
-    // CHECK-NEXT: ( 2, 11, 0, 16, 13, 0, 0, 0, 0, 0, 14, 6, 0, 0, 0, 0, 15, 8, 16, 0, 10, 29, 0, 0, 0, 0, 0, 0, 32, 35, 0, 38 )
-    // CHECK-NEXT: ( 48, 204, 252, 304, 360, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 204, 0, 0, 0, 0, 0, 0, 252, 304, 0, 360 )
-    // CHECK-NEXT: ( 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 204, 0, 0, 0, 0, 0, 0, 252, 304, 0, 360 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 10
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 10
+    // CHECK-NEXT: crd[0] : ( 1, 3, 4, 10, 16, 18, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( 2, 4, 6, 8, 10, 12, 14, 16, 18
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( 2, 4, 6, 8, 10, 12, 14, 16, 18
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 14
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 14
+    // CHECK-NEXT: crd[0] : ( 0, 1, 3, 4, 10, 11, 16, 17, 18, 20, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( 2, 11, 16, 13, 14, 6, 15, 8, 16, 10, 29, 32, 35, 38
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 5
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 5
+    // CHECK-NEXT: crd[0] : ( 3, 21, 28, 29, 31
+    // CHECK-NEXT: values : ( 48, 204, 252, 304, 360
+    // CHECK-NEXT: ----
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 32
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: values : ( 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 204, 0, 0, 0, 0, 0, 0, 252, 304, 0, 360
+    // CHECK-NEXT: ----
     // CHECK-NEXT: 1169.1
     //
-
-    call @dump(%sv1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump(%sv2) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump(%0) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump(%1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump(%2) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump(%3) : (tensor<?xf64, #SparseVector>) -> ()
-    %m4 = sparse_tensor.values %4 : tensor<?xf64, #DenseVector> to memref<?xf64>
-    %v4 = vector.load %m4[%c0]: memref<?xf64>, vector<32xf64>
-    vector.print %v4 : vector<32xf64>
+    sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %sv2 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %0 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %1 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %2 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %3 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %4 : tensor<?xf64, #DenseVector>
     %v5 = tensor.extract %5[] : tensor<f64>
     vector.print %v5 : f64
 


        


More information about the Mlir-commits mailing list