[Mlir-commits] [mlir] c1ac9a0 - [mlir][sparse] Finish migrating integration tests to use sparse_tensor.print (#84997)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Tue Mar 12 17:57:25 PDT 2024


Author: Yinying Li
Date: 2024-03-12T20:57:21-04:00
New Revision: c1ac9a09d04e9be8f8f4860416528baf3691848d

URL: https://github.com/llvm/llvm-project/commit/c1ac9a09d04e9be8f8f4860416528baf3691848d
DIFF: https://github.com/llvm/llvm-project/commit/c1ac9a09d04e9be8f8f4860416528baf3691848d.diff

LOG: [mlir][sparse] Finish migrating integration tests to use sparse_tensor.print (#84997)

Added: 
    

Modified: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_block.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dilated_conv_2d_nhwc_hwcf.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
index f13c1c66df6dce..8024c128189597 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -46,28 +46,10 @@
 // Integration test that tests conversions between sparse tensors.
 //
 module {
-  //
-  // Output utilities.
-  //
-  func.func @dumpf64(%arg0: memref<?xf64>) {
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant -1.0 : f64
-    %0 = vector.transfer_read %arg0[%c0], %d0: memref<?xf64>, vector<24xf64>
-    vector.print %0 : vector<24xf64>
-    return
-  }
-  func.func @dumpidx(%arg0: memref<?xindex>) {
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0 : index
-    %0 = vector.transfer_read %arg0[%c0], %d0: memref<?xindex>, vector<25xindex>
-    vector.print %0 : vector<25xindex>
-    return
-  }
-
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %c2 = arith.constant 2 : index
@@ -110,195 +92,176 @@ module {
     %i = sparse_tensor.convert %3 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64, #Tensor3>
 
     //
-    // Check number_of_entries.
+    // Verify the outputs.
     //
-    // CHECK-COUNT-12: 24
-    %nv1 = sparse_tensor.number_of_entries %1 : tensor<2x3x4xf64, #Tensor1>
-    %nv2 = sparse_tensor.number_of_entries %2 : tensor<2x3x4xf64, #Tensor2>
-    %nv3 = sparse_tensor.number_of_entries %3 : tensor<2x3x4xf64, #Tensor3>
-    %nav = sparse_tensor.number_of_entries %a : tensor<2x3x4xf64, #Tensor1>
-    %nbv = sparse_tensor.number_of_entries %b : tensor<2x3x4xf64, #Tensor1>
-    %ncv = sparse_tensor.number_of_entries %c : tensor<2x3x4xf64, #Tensor1>
-    %ndv = sparse_tensor.number_of_entries %d : tensor<2x3x4xf64, #Tensor2>
-    %nev = sparse_tensor.number_of_entries %e : tensor<2x3x4xf64, #Tensor2>
-    %nfv = sparse_tensor.number_of_entries %f : tensor<2x3x4xf64, #Tensor2>
-    %ngv = sparse_tensor.number_of_entries %g : tensor<2x3x4xf64, #Tensor3>
-    %nhv = sparse_tensor.number_of_entries %h : tensor<2x3x4xf64, #Tensor3>
-    %niv = sparse_tensor.number_of_entries %i : tensor<2x3x4xf64, #Tensor3>
-    vector.print %nv1 : index
-    vector.print %nv2 : index
-    vector.print %nv3 : index
-    vector.print %nav : index
-    vector.print %nbv : index
-    vector.print %ncv : index
-    vector.print %ndv : index
-    vector.print %nev : index
-    vector.print %nfv : index
-    vector.print %ngv : index
-    vector.print %nhv : index
-    vector.print %niv : index
-
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 2, 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24
+    // CHECK-NEXT: ----
     //
-    // Check values.
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24
+    // CHECK-NEXT: ----
     //
-    // CHECK:      ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 )
-    // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 )
-    // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 )
-    // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 )
-    // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 )
-    // CHECK-NEXT: ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 )
-    // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 )
-    // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 )
-    // CHECK-NEXT: ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 4, 2, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9, 12, 15, 18, 21, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: values : ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: ----
     //
-    %v1 = sparse_tensor.values %1 : tensor<2x3x4xf64, #Tensor1> to memref<?xf64>
-    %v2 = sparse_tensor.values %2 : tensor<2x3x4xf64, #Tensor2> to memref<?xf64>
-    %v3 = sparse_tensor.values %3 : tensor<2x3x4xf64, #Tensor3> to memref<?xf64>
-    %av = sparse_tensor.values %a : tensor<2x3x4xf64, #Tensor1> to memref<?xf64>
-    %bv = sparse_tensor.values %b : tensor<2x3x4xf64, #Tensor1> to memref<?xf64>
-    %cv = sparse_tensor.values %c : tensor<2x3x4xf64, #Tensor1> to memref<?xf64>
-    %dv = sparse_tensor.values %d : tensor<2x3x4xf64, #Tensor2> to memref<?xf64>
-    %ev = sparse_tensor.values %e : tensor<2x3x4xf64, #Tensor2> to memref<?xf64>
-    %fv = sparse_tensor.values %f : tensor<2x3x4xf64, #Tensor2> to memref<?xf64>
-    %gv = sparse_tensor.values %g : tensor<2x3x4xf64, #Tensor3> to memref<?xf64>
-    %hv = sparse_tensor.values %h : tensor<2x3x4xf64, #Tensor3> to memref<?xf64>
-    %iv = sparse_tensor.values %i : tensor<2x3x4xf64, #Tensor3> to memref<?xf64>
-
-    call @dumpf64(%v1) : (memref<?xf64>) -> ()
-    call @dumpf64(%v2) : (memref<?xf64>) -> ()
-    call @dumpf64(%v3) : (memref<?xf64>) -> ()
-    call @dumpf64(%av) : (memref<?xf64>) -> ()
-    call @dumpf64(%bv) : (memref<?xf64>) -> ()
-    call @dumpf64(%cv) : (memref<?xf64>) -> ()
-    call @dumpf64(%dv) : (memref<?xf64>) -> ()
-    call @dumpf64(%ev) : (memref<?xf64>) -> ()
-    call @dumpf64(%fv) : (memref<?xf64>) -> ()
-    call @dumpf64(%gv) : (memref<?xf64>) -> ()
-    call @dumpf64(%hv) : (memref<?xf64>) -> ()
-    call @dumpf64(%iv) : (memref<?xf64>) -> ()
-
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 2, 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24
+    // CHECK-NEXT: ----
     //
-    // Check coordinates.
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 2, 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24
+    // CHECK-NEXT: ----
     //
-    // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 2, 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24
+    // CHECK-NEXT: ----
     //
-    %v10 = sparse_tensor.coordinates %1 { level = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %v11 = sparse_tensor.coordinates %1 { level = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %v12 = sparse_tensor.coordinates %1 { level = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %v20 = sparse_tensor.coordinates %2 { level = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %v21 = sparse_tensor.coordinates %2 { level = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %v22 = sparse_tensor.coordinates %2 { level = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %v30 = sparse_tensor.coordinates %3 { level = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %v31 = sparse_tensor.coordinates %3 { level = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %v32 = sparse_tensor.coordinates %3 { level = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-
-    %a10 = sparse_tensor.coordinates %a { level = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %a11 = sparse_tensor.coordinates %a { level = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %a12 = sparse_tensor.coordinates %a { level = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %b10 = sparse_tensor.coordinates %b { level = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %b11 = sparse_tensor.coordinates %b { level = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %b12 = sparse_tensor.coordinates %b { level = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %c10 = sparse_tensor.coordinates %c { level = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %c11 = sparse_tensor.coordinates %c { level = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-    %c12 = sparse_tensor.coordinates %c { level = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref<?xindex>
-
-    %d20 = sparse_tensor.coordinates %d { level = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %d21 = sparse_tensor.coordinates %d { level = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %d22 = sparse_tensor.coordinates %d { level = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %e20 = sparse_tensor.coordinates %e { level = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %e21 = sparse_tensor.coordinates %e { level = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %e22 = sparse_tensor.coordinates %e { level = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %f20 = sparse_tensor.coordinates %f { level = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %f21 = sparse_tensor.coordinates %f { level = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-    %f22 = sparse_tensor.coordinates %f { level = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref<?xindex>
-
-    %g30 = sparse_tensor.coordinates %g { level = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %g31 = sparse_tensor.coordinates %g { level = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %g32 = sparse_tensor.coordinates %g { level = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %h30 = sparse_tensor.coordinates %h { level = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %h31 = sparse_tensor.coordinates %h { level = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %h32 = sparse_tensor.coordinates %h { level = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %i30 = sparse_tensor.coordinates %i { level = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %i31 = sparse_tensor.coordinates %i { level = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-    %i32 = sparse_tensor.coordinates %i { level = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref<?xindex>
-
-    call @dumpidx(%v10) : (memref<?xindex>) -> ()
-    call @dumpidx(%v11) : (memref<?xindex>) -> ()
-    call @dumpidx(%v12) : (memref<?xindex>) -> ()
-    call @dumpidx(%v20) : (memref<?xindex>) -> ()
-    call @dumpidx(%v21) : (memref<?xindex>) -> ()
-    call @dumpidx(%v22) : (memref<?xindex>) -> ()
-    call @dumpidx(%v30) : (memref<?xindex>) -> ()
-    call @dumpidx(%v31) : (memref<?xindex>) -> ()
-    call @dumpidx(%v32) : (memref<?xindex>) -> ()
-
-    call @dumpidx(%a10) : (memref<?xindex>) -> ()
-    call @dumpidx(%a11) : (memref<?xindex>) -> ()
-    call @dumpidx(%a12) : (memref<?xindex>) -> ()
-    call @dumpidx(%b10) : (memref<?xindex>) -> ()
-    call @dumpidx(%b11) : (memref<?xindex>) -> ()
-    call @dumpidx(%b12) : (memref<?xindex>) -> ()
-    call @dumpidx(%c10) : (memref<?xindex>) -> ()
-    call @dumpidx(%c11) : (memref<?xindex>) -> ()
-    call @dumpidx(%c12) : (memref<?xindex>) -> ()
-
-    call @dumpidx(%d20) : (memref<?xindex>) -> ()
-    call @dumpidx(%d21) : (memref<?xindex>) -> ()
-    call @dumpidx(%d22) : (memref<?xindex>) -> ()
-    call @dumpidx(%e20) : (memref<?xindex>) -> ()
-    call @dumpidx(%e21) : (memref<?xindex>) -> ()
-    call @dumpidx(%e22) : (memref<?xindex>) -> ()
-    call @dumpidx(%f20) : (memref<?xindex>) -> ()
-    call @dumpidx(%f21) : (memref<?xindex>) -> ()
-    call @dumpidx(%f22) : (memref<?xindex>) -> ()
-
-    call @dumpidx(%g30) : (memref<?xindex>) -> ()
-    call @dumpidx(%g31) : (memref<?xindex>) -> ()
-    call @dumpidx(%g32) : (memref<?xindex>) -> ()
-    call @dumpidx(%h30) : (memref<?xindex>) -> ()
-    call @dumpidx(%h31) : (memref<?xindex>) -> ()
-    call @dumpidx(%h32) : (memref<?xindex>) -> ()
-    call @dumpidx(%i30) : (memref<?xindex>) -> ()
-    call @dumpidx(%i31) : (memref<?xindex>) -> ()
-    call @dumpidx(%i32) : (memref<?xindex>) -> ()
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 4, 2, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9, 12, 15, 18, 21, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: values : ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 4, 2, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9, 12, 15, 18, 21, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: values : ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 24
+    // CHECK-NEXT: dim = ( 2, 3, 4 )
+    // CHECK-NEXT: lvl = ( 4, 2, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9, 12, 15, 18, 21, 24
+    // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: values : ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %1 : tensor<2x3x4xf64, #Tensor1>
+    sparse_tensor.print %2 : tensor<2x3x4xf64, #Tensor2>
+    sparse_tensor.print %3 : tensor<2x3x4xf64, #Tensor3>
+    sparse_tensor.print %a : tensor<2x3x4xf64, #Tensor1>
+    sparse_tensor.print %b : tensor<2x3x4xf64, #Tensor1>
+    sparse_tensor.print %c : tensor<2x3x4xf64, #Tensor1>
+    sparse_tensor.print %d : tensor<2x3x4xf64, #Tensor2>
+    sparse_tensor.print %e : tensor<2x3x4xf64, #Tensor2>
+    sparse_tensor.print %f : tensor<2x3x4xf64, #Tensor2>
+    sparse_tensor.print %g : tensor<2x3x4xf64, #Tensor3>
+    sparse_tensor.print %h : tensor<2x3x4xf64, #Tensor3>
+    sparse_tensor.print %i : tensor<2x3x4xf64, #Tensor3>
 
     // Release the resources.
     bufferization.dealloc_tensor %1 : tensor<2x3x4xf64, #Tensor1>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_block.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_block.mlir
index 809414ba977d2b..ff22283f43a793 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_block.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_block.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -52,21 +52,10 @@
 // Integration test that tests conversions between sparse tensors.
 //
 module {
-  //
-  // Output utilities.
-  //
-  func.func @dumpf64(%arg0: memref<?xf64>) {
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant -1.0 : f64
-    %0 = vector.transfer_read %arg0[%c0], %d0: memref<?xf64>, vector<8xf64>
-    vector.print %0 : vector<8xf64>
-    return
-  }
-
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %c2 = arith.constant 2 : index
@@ -88,20 +77,47 @@ module {
     %3 = sparse_tensor.convert %1 : tensor<2x4xf64, #BSR> to tensor<2x4xf64, #CSR>
     %4 = sparse_tensor.convert %1 : tensor<2x4xf64, #BSR> to tensor<2x4xf64, #CSC>
 
-    %v1 = sparse_tensor.values %1 : tensor<2x4xf64, #BSR> to memref<?xf64>
-    %v2 = sparse_tensor.values %2 : tensor<2x4xf64, #BSR> to memref<?xf64>
-    %v3 = sparse_tensor.values %3 : tensor<2x4xf64, #CSR> to memref<?xf64>
-    %v4 = sparse_tensor.values %4 : tensor<2x4xf64, #CSC> to memref<?xf64>
-
-
-    // CHECK:      ( 1, 2, 5, 6, 3, 4, 7, 8 )
-    // CHECK-NEXT: ( 1, 2, 5, 6, 3, 4, 7, 8 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8 )
-    // CHECK-NEXT: ( 1, 5, 2, 6, 3, 7, 4, 8 )
-    call @dumpf64(%v1) : (memref<?xf64>) -> ()
-    call @dumpf64(%v2) : (memref<?xf64>) -> ()
-    call @dumpf64(%v3) : (memref<?xf64>) -> ()
-    call @dumpf64(%v4) : (memref<?xf64>) -> ()
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 2, 4 )
+    // CHECK-NEXT: lvl = ( 1, 2, 2, 2 )
+    // CHECK-NEXT: pos[1] : ( 0, 2
+    // CHECK-NEXT: crd[1] : ( 0, 1
+    // CHECK-NEXT: values : ( 1, 2, 5, 6, 3, 4, 7, 8
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 2, 4 )
+    // CHECK-NEXT: lvl = ( 1, 2, 2, 2 )
+    // CHECK-NEXT: pos[1] : ( 0, 2
+    // CHECK-NEXT: crd[1] : ( 0, 1
+    // CHECK-NEXT: values : ( 1, 2, 5, 6, 3, 4, 7, 8
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 2, 4 )
+    // CHECK-NEXT: lvl = ( 2, 4 )
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 2, 4 )
+    // CHECK-NEXT: lvl = ( 4, 2 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1, 5, 2, 6, 3, 7, 4, 8
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %1 : tensor<2x4xf64, #BSR>
+    sparse_tensor.print %2 : tensor<2x4xf64, #BSR>
+    sparse_tensor.print %3 : tensor<2x4xf64, #CSR>
+    sparse_tensor.print %4 : tensor<2x4xf64, #CSC>
 
     // TODO: Fix memory leaks.
     bufferization.dealloc_tensor %1 : tensor<2x4xf64, #BSR>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir
index f658457fa67356..11baf65e635091 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -44,19 +44,7 @@
 // may change (the actual underlying sizes obviously never change).
 //
 module {
-
-  func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
-
-  //
-  // Helper method to print values array. The transfer actually
-  // reads more than required to verify size of buffer as well.
-  //
-  func.func @dump(%arg0: memref<?xf64>) {
-    call @printMemref1dF64(%arg0) : (memref<?xf64>) -> ()
-    return
-  }
-
-  func.func @entry() {
+  func.func @main() {
     %t1 = arith.constant sparse<
       [ [0,0], [0,1], [0,63], [1,0], [1,1], [31,0], [31,63] ],
         [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 ]> : tensor<32x64xf64>
@@ -72,45 +60,81 @@ module {
     %5 = sparse_tensor.convert %3 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64, #DCSC>
     %6 = sparse_tensor.convert %4 : tensor<?x?xf64, #DCSC> to tensor<?x?xf64, #DCSR>
 
-//
-    // Check number_of_entries.
     //
-    // CHECK-COUNT-6: 7
-    %n1 = sparse_tensor.number_of_entries %1 : tensor<?x?xf64, #DCSR>
-    %n2 = sparse_tensor.number_of_entries %2 : tensor<?x?xf64, #DCSC>
-    %n3 = sparse_tensor.number_of_entries %3 : tensor<?x?xf64, #DCSR>
-    %n4 = sparse_tensor.number_of_entries %4 : tensor<?x?xf64, #DCSC>
-    %n5 = sparse_tensor.number_of_entries %5 : tensor<?x?xf64, #DCSC>
-    %n6 = sparse_tensor.number_of_entries %6 : tensor<?x?xf64, #DCSR>
-    vector.print %n1 : index
-    vector.print %n2 : index
-    vector.print %n3 : index
-    vector.print %n4 : index
-    vector.print %n5 : index
-    vector.print %n6 : index
-
+    // Verify the outputs.
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 32, 64 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 31
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 64, 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 63
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
+    // CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 32, 64 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 31
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 64, 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 63
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
+    // CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
+    // CHECK-NEXT: ----
     //
-    // All proper row-/column-wise?
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 64, 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 63
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
+    // CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
+    // CHECK-NEXT: ----
     //
-    // CHECK: [1,  2,  3,  4,  5,  6,  7
-    // CHECK: [1,  4,  6,  2,  5,  3,  7
-    // CHECK: [1,  2,  3,  4,  5,  6,  7
-    // CHECK: [1,  4,  6,  2,  5,  3,  7
-    // CHECK: [1,  4,  6,  2,  5,  3,  7
-    // CHECK: [1,  2,  3,  4,  5,  6,  7
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 32, 64 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 31
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: ----
     //
-    %m1 = sparse_tensor.values %1 : tensor<?x?xf64, #DCSR> to memref<?xf64>
-    %m2 = sparse_tensor.values %2 : tensor<?x?xf64, #DCSC> to memref<?xf64>
-    %m3 = sparse_tensor.values %3 : tensor<?x?xf64, #DCSR> to memref<?xf64>
-    %m4 = sparse_tensor.values %4 : tensor<?x?xf64, #DCSC> to memref<?xf64>
-    %m5 = sparse_tensor.values %5 : tensor<?x?xf64, #DCSC> to memref<?xf64>
-    %m6 = sparse_tensor.values %6 : tensor<?x?xf64, #DCSR> to memref<?xf64>
-    call @dump(%m1) : (memref<?xf64>) -> ()
-    call @dump(%m2) : (memref<?xf64>) -> ()
-    call @dump(%m3) : (memref<?xf64>) -> ()
-    call @dump(%m4) : (memref<?xf64>) -> ()
-    call @dump(%m5) : (memref<?xf64>) -> ()
-    call @dump(%m6) : (memref<?xf64>) -> ()
+    sparse_tensor.print %1 : tensor<?x?xf64, #DCSR>
+    sparse_tensor.print %2 : tensor<?x?xf64, #DCSC>
+    sparse_tensor.print %3 : tensor<?x?xf64, #DCSR>
+    sparse_tensor.print %4 : tensor<?x?xf64, #DCSC>
+    sparse_tensor.print %5 : tensor<?x?xf64, #DCSC>
+    sparse_tensor.print %6 : tensor<?x?xf64, #DCSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %1 : tensor<?x?xf64, #DCSR>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
index 81f68366be28c6..a2ec6df392aaa3 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -55,7 +55,7 @@ module {
   //
   // The first test suite (for non-singleton LevelTypes).
   //
-  func.func @entry() {
+  func.func @main() {
     //
     // Initialize a 3-dim dense tensor.
     //

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
index 3ebe3be757d2d0..6005aa6cfeaed6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -54,41 +54,7 @@
 // in addition to layout.
 //
 module {
-
-  //
-  // Helper method to print values and indices arrays. The transfer actually
-  // reads more than required to verify size of buffer as well.
-  //
-  func.func @dumpf64(%arg0: memref<?xf64>) {
-    %c = arith.constant 0 : index
-    %d = arith.constant 0.0 : f64
-    %0 = vector.transfer_read %arg0[%c], %d: memref<?xf64>, vector<8xf64>
-    vector.print %0 : vector<8xf64>
-    return
-  }
-  func.func @dumpi08(%arg0: memref<?xi8>) {
-    %c = arith.constant 0 : index
-    %d = arith.constant 0 : i8
-    %0 = vector.transfer_read %arg0[%c], %d: memref<?xi8>, vector<8xi8>
-    vector.print %0 : vector<8xi8>
-    return
-  }
-  func.func @dumpi32(%arg0: memref<?xi32>) {
-    %c = arith.constant 0 : index
-    %d = arith.constant 0 : i32
-    %0 = vector.transfer_read %arg0[%c], %d: memref<?xi32>, vector<8xi32>
-    vector.print %0 : vector<8xi32>
-    return
-  }
-  func.func @dumpi64(%arg0: memref<?xi64>) {
-    %c = arith.constant 0 : index
-    %d = arith.constant 0 : i64
-    %0 = vector.transfer_read %arg0[%c], %d: memref<?xi64>, vector<8xi64>
-    vector.print %0 : vector<8xi64>
-    return
-  }
-
-  func.func @entry() {
+  func.func @main() {
     %c1 = arith.constant 1 : index
     %t1 = arith.constant sparse<
       [ [0,0], [0,1], [0,63], [1,0], [1,1], [31,0], [31,63] ],
@@ -106,50 +72,78 @@ module {
     %6 = sparse_tensor.convert %3 : tensor<32x64xf64, #CSC>  to tensor<32x64xf64, #DCSR>
 
     //
-    // All proper row-/column-wise?
+    // Verify the outputs.
     //
-    // CHECK:      ( 1, 2, 3, 4, 5, 6, 7, 0 )
-    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, 0 )
-    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, 0 )
-    // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, 0 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 0 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 32, 64 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 31
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: ----
     //
-    %m1 = sparse_tensor.values %1 : tensor<32x64xf64, #DCSR> to memref<?xf64>
-    %m2 = sparse_tensor.values %2 : tensor<32x64xf64, #DCSC> to memref<?xf64>
-    %m3 = sparse_tensor.values %3 : tensor<32x64xf64, #CSC>  to memref<?xf64>
-    %m4 = sparse_tensor.values %4 : tensor<32x64xf64, #DCSC> to memref<?xf64>
-    %m5 = sparse_tensor.values %5 : tensor<32x64xf64, #DCSR> to memref<?xf64>
-    %m6 = sparse_tensor.values %6 : tensor<32x64xf64, #DCSR> to memref<?xf64>
-    call @dumpf64(%m1) : (memref<?xf64>) -> ()
-    call @dumpf64(%m2) : (memref<?xf64>) -> ()
-    call @dumpf64(%m3) : (memref<?xf64>) -> ()
-    call @dumpf64(%m4) : (memref<?xf64>) -> ()
-    call @dumpf64(%m5) : (memref<?xf64>) -> ()
-    call @dumpf64(%m6) : (memref<?xf64>) -> ()
-
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 64, 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 63
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
+    // CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 64, 32 )
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
+    // CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 64, 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 63
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
+    // CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
+    // CHECK-NEXT: ----
     //
-    // Sanity check on indices.
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 32, 64 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 31
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: ----
     //
-    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, 0 )
-    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, 0 )
-    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, 0 )
-    // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, 0 )
-    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, 0 )
-    // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 7
+    // CHECK-NEXT: dim = ( 32, 64 )
+    // CHECK-NEXT: lvl = ( 32, 64 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 31
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: ----
     //
-    %i1 = sparse_tensor.coordinates %1 { level = 1 : index } : tensor<32x64xf64, #DCSR> to memref<?xi8>
-    %i2 = sparse_tensor.coordinates %2 { level = 1 : index } : tensor<32x64xf64, #DCSC> to memref<?xi64>
-    %i3 = sparse_tensor.coordinates %3 { level = 1 : index } : tensor<32x64xf64, #CSC>  to memref<?xi32>
-    %i4 = sparse_tensor.coordinates %4 { level = 1 : index } : tensor<32x64xf64, #DCSC> to memref<?xi64>
-    %i5 = sparse_tensor.coordinates %5 { level = 1 : index } : tensor<32x64xf64, #DCSR> to memref<?xi8>
-    %i6 = sparse_tensor.coordinates %6 { level = 1 : index } : tensor<32x64xf64, #DCSR> to memref<?xi8>
-    call @dumpi08(%i1) : (memref<?xi8>)  -> ()
-    call @dumpi64(%i2) : (memref<?xi64>) -> ()
-    call @dumpi32(%i3) : (memref<?xi32>) -> ()
-    call @dumpi64(%i4) : (memref<?xi64>) -> ()
-    call @dumpi08(%i5) : (memref<?xi08>) -> ()
-    call @dumpi08(%i6) : (memref<?xi08>) -> ()
+    sparse_tensor.print %1 : tensor<32x64xf64, #DCSR>
+    sparse_tensor.print %2 : tensor<32x64xf64, #DCSC>
+    sparse_tensor.print %3 : tensor<32x64xf64, #CSC>
+    sparse_tensor.print %4 : tensor<32x64xf64, #DCSC>
+    sparse_tensor.print %5 : tensor<32x64xf64, #DCSR>
+    sparse_tensor.print %6 : tensor<32x64xf64, #DCSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %1 : tensor<32x64xf64, #DCSR>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir
index 1655d6a03a6202..9b05f9bf3a29c2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -107,7 +107,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     //
     // Initialize a 3-dim dense tensor.
     //

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir
index 2ace317554a070..0f9dfb9da7204f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -180,7 +180,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     call @testNonSingleton() : () -> ()
     call @testSingleton() : () -> ()
     return

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
index 16252c1005ebbb..16813e0aa707b8 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -93,17 +93,17 @@ module {
 
   func.func @add_coo_coo_out_coo(%arga: tensor<8x8xf32, #SortedCOO>,
                                  %argb: tensor<8x8xf32, #SortedCOOSoA>)
-		                 -> tensor<8x8xf32, #SortedCOO> {
-    %init = tensor.empty() : tensor<8x8xf32, #SortedCOO>
+		                 -> tensor<8x8xf32, #SortedCOOSoA> {
+    %init = tensor.empty() : tensor<8x8xf32, #SortedCOOSoA>
     %0 = linalg.generic #trait
       ins(%arga, %argb: tensor<8x8xf32, #SortedCOO>,
                         tensor<8x8xf32, #SortedCOOSoA>)
-      outs(%init: tensor<8x8xf32, #SortedCOO>) {
+      outs(%init: tensor<8x8xf32, #SortedCOOSoA>) {
         ^bb(%a: f32, %b: f32, %x: f32):
           %0 = arith.addf %a, %b : f32
           linalg.yield %0 : f32
-        } -> tensor<8x8xf32, #SortedCOO>
-    return %0 : tensor<8x8xf32, #SortedCOO>
+        } -> tensor<8x8xf32, #SortedCOOSoA>
+    return %0 : tensor<8x8xf32, #SortedCOOSoA>
   }
 
 
@@ -126,7 +126,7 @@ module {
     return %0 : tensor<8x8xf32>
   }
 
-  func.func @entry() {
+  func.func @main() {
     %c0  = arith.constant 0 : index
     %c1  = arith.constant 1 : index
     %c8  = arith.constant 8 : index
@@ -171,8 +171,9 @@ module {
                                             -> tensor<8x8xf32>
     %COO_RET = call @add_coo_coo_out_coo(%COO_A, %COO_B) : (tensor<8x8xf32, #SortedCOO>,
                                                             tensor<8x8xf32, #SortedCOOSoA>)
-                                                         -> tensor<8x8xf32, #SortedCOO>
-    %C4 = sparse_tensor.convert %COO_RET : tensor<8x8xf32, #SortedCOO> to tensor<8x8xf32>
+                                                         -> tensor<8x8xf32, #SortedCOOSoA>
+    %C4 = sparse_tensor.convert %COO_RET : tensor<8x8xf32, #SortedCOOSoA> to tensor<8x8xf32>
+
     //
     // Verify computed matrix C.
     //
@@ -201,6 +202,32 @@ module {
       vector.print %v4 : vector<8xf32>
     }
 
+    //
+    // Ensure that COO-SoA output has the same values.
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 64
+    // CHECK-NEXT: dim = ( 8, 8 )
+    // CHECK-NEXT: lvl = ( 8, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 64
+    // CHECK-NEXT: crd[0] : ( 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+    // CHECK-SAME:            2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+    // CHECK-SAME:            5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7,
+    // CHECK-SAME:            7, 7, 7, 7
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3,
+    // CHECK-SAME:            4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
+    // CHECK-SAME:            0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3,
+    // CHECK-SAME:            4, 5, 6, 7
+    // CHECK-NEXT: values : ( 8.8, 4.8, 6.8, 4.8, 8.8, 6.1, 14.8, 16.8, 4.4, 4.4, 4.4, 8.4,
+    // CHECK-SAME:            8.4, 12.4, 16.4, 16.4, 8.8, 4.8, 6.8, 8.8, 8.8, 12.8, 14.8,
+    // CHECK-SAME:            15.8, 4.3, 5.3, 6.3, 8.3, 8.3, 12.3, 14.3, 16.3, 4.5, 4.5,
+    // CHECK-SAME:            6.5, 8.5, 8.5, 12.5, 14.5, 16.5, 9.9, 4.9, 6.9, 8.9, 8.9,
+    // CHECK-SAME:            12.9, 15.9, 16.9, 12.1, 6.1, 5.1, 9.1, 9.1, 13.1, 15.1, 17.1,
+    // CHECK-SAME:            15.4, 5.4, 7.4, 5.4, 11.4, 10.4, 11.4, 9.4
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %COO_RET : tensor<8x8xf32, #SortedCOOSoA>
+
     // Release resources.
     bufferization.dealloc_tensor %C1 : tensor<8x8xf32>
     bufferization.dealloc_tensor %C2 : tensor<8x8xf32>
@@ -209,7 +236,7 @@ module {
     bufferization.dealloc_tensor %CSR_A : tensor<8x8xf32, #CSR>
     bufferization.dealloc_tensor %COO_A : tensor<8x8xf32, #SortedCOO>
     bufferization.dealloc_tensor %COO_B : tensor<8x8xf32, #SortedCOOSoA>
-    bufferization.dealloc_tensor %COO_RET : tensor<8x8xf32, #SortedCOO>
+    bufferization.dealloc_tensor %COO_RET : tensor<8x8xf32, #SortedCOOSoA>
 
 
     return

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dilated_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dilated_conv_2d_nhwc_hwcf.mlir
index b4d40ae084015a..40738a9f7d7f19 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dilated_conv_2d_nhwc_hwcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dilated_conv_2d_nhwc_hwcf.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -78,7 +78,7 @@ func.func @conv_2d_nhwc_hwcf_dual_CDCC(%arg0: tensor<?x?x?x?xf32, #CDCC>, %arg1:
 }
 
 
-func.func @entry() {
+func.func @main() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir
index f7ba4daa245892..5451f2d957ad35 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -49,7 +49,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     // Setup two sparse vectors.
     %d1 = arith.constant sparse<
         [ [0], [1], [22], [23], [1022] ], [1.0, 2.0, 3.0, 4.0, 5.0]
@@ -60,6 +60,30 @@ module {
     %s1 = sparse_tensor.convert %d1 : tensor<1024xf32> to tensor<1024xf32, #SparseVector>
     %s2 = sparse_tensor.convert %d2 : tensor<1024xf32> to tensor<1024xf32, #SparseVector>
 
+    //
+    // Verify the inputs.
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 5
+    // CHECK-NEXT: dim = ( 1024 )
+    // CHECK-NEXT: lvl = ( 1024 )
+    // CHECK-NEXT: pos[0] : ( 0, 5
+    // CHECK-NEXT: crd[0] : ( 0, 1, 22, 23, 1022
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 3
+    // CHECK-NEXT: dim = ( 1024 )
+    // CHECK-NEXT: lvl = ( 1024 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 22, 1022, 1023
+    // CHECK-NEXT: values : ( 6, 7, 8
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %s1 : tensor<1024xf32, #SparseVector>
+    sparse_tensor.print %s2 : tensor<1024xf32, #SparseVector>
+
     // Call the kernel and verify the output.
     //
     // CHECK: 53
@@ -73,16 +97,6 @@ module {
     %1 = tensor.extract %0[] : tensor<f32>
     vector.print %1 : f32
 
-    // Print number of entries in the sparse vectors.
-    //
-    // CHECK: 5
-    // CHECK: 3
-    //
-    %noe1 = sparse_tensor.number_of_entries %s1 : tensor<1024xf32, #SparseVector>
-    %noe2 = sparse_tensor.number_of_entries %s2 : tensor<1024xf32, #SparseVector>
-    vector.print %noe1 : index
-    vector.print %noe2 : index
-
     // Release the resources.
     bufferization.dealloc_tensor %0 : tensor<f32>
     bufferization.dealloc_tensor %s1 : tensor<1024xf32, #SparseVector>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
index 93a7ea51ec9c4d..451195b2185b76 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -35,8 +35,6 @@
 }>
 
 module {
-  func.func private @printMemrefF64(%ptr : tensor<*xf64>)
-
   //
   // Column-wise storage forces the ijk loop to permute into jki
   // so that access pattern expansion (workspace) needs to be
@@ -54,7 +52,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %d1 = arith.constant -1.0 : f64
 
@@ -83,24 +81,26 @@ module {
        : (tensor<8x2xf64, #CSC>,
           tensor<2x4xf64, #CSC>) -> tensor<8x4xf64, #CSC>
 
-    // CHECK:      {{\[}}[32.53,   35.73,   38.93,   42.13],
-    // CHECK-NEXT: [34.56,   37.96,   41.36,   44.76],
-    // CHECK-NEXT: [36.59,   40.19,   43.79,   47.39],
-    // CHECK-NEXT: [38.62,   42.42,   46.22,   50.02],
-    // CHECK-NEXT: [40.65,   44.65,   48.65,   52.65],
-    // CHECK-NEXT: [42.68,   46.88,   51.08,   55.28],
-    // CHECK-NEXT: [44.71,   49.11,   53.51,   57.91],
-    // CHECK-NEXT: [46.74,   51.34,   55.94,   60.54]]
     //
-    %xc = sparse_tensor.convert %x3 : tensor<8x4xf64, #CSC> to tensor<8x4xf64>
-    %xu = tensor.cast %xc : tensor<8x4xf64> to tensor<*xf64>
-    call @printMemrefF64(%xu) : (tensor<*xf64>) -> ()
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 32
+    // CHECK-NEXT: dim = ( 8, 4 )
+    // CHECK-NEXT: lvl = ( 4, 8 )
+    // CHECK-NEXT: pos[1] : ( 0, 8, 16, 24, 32
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0,
+    // CHECK-SAME:            1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: values : ( 32.53, 34.56, 36.59, 38.62, 40.65, 42.68, 44.71, 46.74,
+    // CHECK-SAME:            35.73, 37.96, 40.19, 42.42, 44.65, 46.88, 49.11, 51.34,
+    // CHECK-SAME:            38.93, 41.36, 43.79, 46.22, 48.65, 51.08, 53.51, 55.94,
+    // CHECK-SAME:            42.13, 44.76, 47.39, 50.02, 52.65, 55.28, 57.91, 60.54
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %x3 : tensor<8x4xf64, #CSC>
 
     // Release the resources.
     bufferization.dealloc_tensor %x1 : tensor<8x2xf64, #CSC>
     bufferization.dealloc_tensor %x2 : tensor<2x4xf64, #CSC>
     bufferization.dealloc_tensor %x3 : tensor<8x4xf64, #CSC>
-    bufferization.dealloc_tensor %xc : tensor<8x4xf64>
 
     return
   }

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir
index c784375e0f3eaf..6679a81c74088b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -115,7 +115,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %df = arith.constant -1.0 : f64
 
@@ -147,60 +147,111 @@ module {
     %expand11 = call @expand_sparse2sparse_dyn(%sdm) : (tensor<?x?xf64, #SparseMatrix>) -> tensor<?x2x?xf64, #Sparse3dTensor>
 
     //
-    // Verify results of expand
+    // Verify results of expand with dense output.
     //
     // CHECK:      ( ( 1, 0, 3, 0 ), ( 5, 0, 7, 0 ), ( 9, 0, 11, 0 ) )
     // CHECK-NEXT: ( ( 1, 0, 3, 0 ), ( 5, 0, 7, 0 ), ( 9, 0, 11, 0 ) )
-    // CHECK-NEXT: ( 1, 3, 5, 7, 9,
-    // CHECK-NEXT: ( 1, 3, 5, 7, 9,
     // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) )
     // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) )
-    // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 )
-    // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 )
     // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) )
     // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) )
-    // CHECK-NEXT: 12
-    // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 )
-    // CHECK-NEXT: 12
-    // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 )
     //
-
     %m0 = vector.transfer_read %expand0[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64>
     vector.print %m0 : vector<3x4xf64>
     %m1 = vector.transfer_read %expand1[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64>
     vector.print %m1 : vector<3x4xf64>
-    %a2 = sparse_tensor.values %expand2 : tensor<3x4xf64, #SparseMatrix> to memref<?xf64>
-    %m2 = vector.transfer_read %a2[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %m2 : vector<12xf64>
-    %a3 = sparse_tensor.values %expand3 : tensor<3x4xf64, #SparseMatrix> to memref<?xf64>
-    %m3 = vector.transfer_read %a3[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %m3 : vector<12xf64>
-
     %m4 = vector.transfer_read %expand4[%c0, %c0, %c0], %df: tensor<3x2x2xf64>, vector<3x2x2xf64>
     vector.print %m4 : vector<3x2x2xf64>
     %m5 = vector.transfer_read %expand5[%c0, %c0, %c0], %df: tensor<3x2x2xf64>, vector<3x2x2xf64>
     vector.print %m5 : vector<3x2x2xf64>
-    %a6 = sparse_tensor.values %expand6 : tensor<3x2x2xf64, #Sparse3dTensor> to memref<?xf64>
-    %m6 = vector.transfer_read %a6[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %m6 : vector<12xf64>
-    %a7 = sparse_tensor.values %expand7 : tensor<3x2x2xf64, #Sparse3dTensor> to memref<?xf64>
-    %m7 = vector.transfer_read %a7[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %m7 : vector<12xf64>
-
     %m8 = vector.transfer_read %expand8[%c0, %c0, %c0], %df: tensor<?x2x?xf64>, vector<3x2x2xf64>
     vector.print %m8 : vector<3x2x2xf64>
     %m9 = vector.transfer_read %expand9[%c0, %c0, %c0], %df: tensor<?x2x?xf64>, vector<3x2x2xf64>
     vector.print %m9 : vector<3x2x2xf64>
-    %n10 = sparse_tensor.number_of_entries %expand10 : tensor<?x2x?xf64, #Sparse3dTensor>
-    vector.print %n10 : index
-    %a10 = sparse_tensor.values %expand10 : tensor<?x2x?xf64, #Sparse3dTensor> to memref<?xf64>
-    %m10 = vector.transfer_read %a10[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %m10 : vector<12xf64>
-    %n11 = sparse_tensor.number_of_entries %expand11 : tensor<?x2x?xf64, #Sparse3dTensor>
-    vector.print %n11 : index
-    %a11 = sparse_tensor.values %expand11 : tensor<?x2x?xf64, #Sparse3dTensor> to memref<?xf64>
-    %m11 = vector.transfer_read %a11[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %m11 : vector<12xf64>
+
+    //
+    // Verify results of expand with sparse output.
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6
+    // CHECK-NEXT: crd[1] : ( 0, 2, 0, 2, 0, 2
+    // CHECK-NEXT: values : ( 1, 3, 5, 7, 9, 11
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6
+    // CHECK-NEXT: crd[1] : ( 0, 2, 0, 2, 0, 2
+    // CHECK-NEXT: values : ( 1, 3, 5, 7, 9, 11
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 3, 2, 2 )
+    // CHECK-NEXT: lvl = ( 3, 2, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 3, 2, 2 )
+    // CHECK-NEXT: lvl = ( 3, 2, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 3, 2, 2 )
+    // CHECK-NEXT: lvl = ( 3, 2, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 3, 2, 2 )
+    // CHECK-NEXT: lvl = ( 3, 2, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %expand2 : tensor<3x4xf64, #SparseMatrix>
+    sparse_tensor.print %expand3 : tensor<3x4xf64, #SparseMatrix>
+    sparse_tensor.print %expand6 : tensor<3x2x2xf64, #Sparse3dTensor>
+    sparse_tensor.print %expand7 : tensor<3x2x2xf64, #Sparse3dTensor>
+    sparse_tensor.print %expand10 : tensor<?x2x?xf64, #Sparse3dTensor>
+    sparse_tensor.print %expand11 : tensor<?x2x?xf64, #Sparse3dTensor>
 
 
     // Release sparse resources.

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
index 7478b604ff6715..37ff2e3ffd3f46 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -53,7 +53,7 @@ module {
     return %0 : tensor<6x6xi32, #DCSR>
   }
 
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %i0 = arith.constant 0 : i32
 
@@ -100,25 +100,27 @@ module {
     vector.print %v : vector<6x6xi32>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %sparse_ret = sparse_tensor.convert %1
-      : tensor<6x6xi32, #DCSR> to tensor<6x6xi32>
-    %v1 = vector.transfer_read %sparse_ret[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v1 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4,
+    // CHECK-SAME:            5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1,
+    // CHECK-SAME:            0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %1 : tensor<6x6xi32, #DCSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %sparse_filter : tensor<3x3xi32, #DCSR>
     bufferization.dealloc_tensor %0 : tensor<6x6xi32>
     bufferization.dealloc_tensor %1 : tensor<6x6xi32, #DCSR>
-    bufferization.dealloc_tensor %sparse_ret : tensor<6x6xi32>
 
     return
   }

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
index 55ecac8dbc1874..8a5712d3fa1b10 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -82,7 +82,7 @@ module {
   //
   // Main driver that reads tensor from file and calls the sparse kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %d0 = arith.constant 0.0 : f64
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
index 46b83be21dcf30..aef3a947e4f0ba 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -100,7 +100,7 @@ module {
     return
   }
 
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %c2 = arith.constant 2 : index

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
index c1547033062d37..e1f73eb4ac4fe6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_generate.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -39,7 +39,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %f0 = arith.constant 0.0 : f64
@@ -78,12 +78,20 @@ module {
     }
 
     %sv = sparse_tensor.convert %output : tensor<?xf64> to tensor<?xf64, #SparseVector>
-    %n0 = sparse_tensor.number_of_entries %sv : tensor<?xf64, #SparseVector>
 
-    // Print the number of non-zeros for verification.
     //
-    // CHECK: 5
-    vector.print %n0 : index
+    // Verify the outputs.
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 5
+    // CHECK-NEXT: dim = ( 50 )
+    // CHECK-NEXT: lvl = ( 50 )
+    // CHECK-NEXT: pos[0] : ( 0, 5
+    // CHECK-NEXT: crd[0] : ( 1, 9, 17, 27, 30
+    // CHECK-NEXT: values : ( 84, 34, 8, 40, 93
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %sv : tensor<?xf64, #SparseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv : tensor<?xf64, #SparseVector>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir
index 70a63ae7bc9270..3ce45e5fd971f2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -160,7 +160,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %du = arith.constant -1 : i64
     %df = arith.constant -1.0 : f32
@@ -208,63 +208,112 @@ module {
     //
     // Verify result.
     //
-    // CHECK:      2
-    // CHECK-NEXT: 8
-    // CHECK-NEXT: 8
-    // CHECK-NEXT: 8
-    // CHECK-NEXT: 2
-    // CHECK-NEXT: 12
-    // CHECK-NEXT: 12
-    // CHECK-NEXT: 12
-    // CHECK-NEXT: ( 20, 80 )
-    // CHECK-NEXT: ( 0, 1, 12, 3, 24, 5, 6, 7 )
-    // CHECK-NEXT: ( 0, 2, 8, 24, 64, 160, 384, 896 )
-    // CHECK-NEXT: ( 1, 3, 6, 11, 20, 37, 70, 135 )
-    // CHECK-NEXT: ( 10, 120 )
-    // CHECK-NEXT: ( 0, 1, 2, 3, 1, 12, 3, 4, 2, 3, 4, 25 )
-    // CHECK-NEXT: ( 0, 0, 0, 0, 0, 2, 2, 3, 0, 2, 12, 24 )
-    // CHECK-NEXT: ( 1, 2, 3, 4, 2, 4, 4, 5, 3, 4, 7, 9 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 2
+    // CHECK-NEXT: dim = ( 8 )
+    // CHECK-NEXT: lvl = ( 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 2, 4
+    // CHECK-NEXT: values : ( 20, 80
+    // CHECK-NEXT: ----
     //
-    %n0 = sparse_tensor.number_of_entries %0 : tensor<8xi64, #SparseVector>
-    %n1 = sparse_tensor.number_of_entries %1 : tensor<8xi64, #SparseVector>
-    %n2 = sparse_tensor.number_of_entries %2 : tensor<8xi64, #SparseVector>
-    %n3 = sparse_tensor.number_of_entries %3 : tensor<8xi64, #SparseVector>
-    %n4 = sparse_tensor.number_of_entries %4 : tensor<3x4xi64, #SparseMatrix>
-    %n5 = sparse_tensor.number_of_entries %5 : tensor<3x4xi64, #SparseMatrix>
-    %n6 = sparse_tensor.number_of_entries %6 : tensor<3x4xi64, #SparseMatrix>
-    %n7 = sparse_tensor.number_of_entries %7 : tensor<3x4xi64, #SparseMatrix>
-    %8 = sparse_tensor.values %0 : tensor<8xi64, #SparseVector> to memref<?xi64>
-    %9 = sparse_tensor.values %1 : tensor<8xi64, #SparseVector> to memref<?xi64>
-    %10 = sparse_tensor.values %2 : tensor<8xi64, #SparseVector> to memref<?xi64>
-    %11 = sparse_tensor.values %3 : tensor<8xi64, #SparseVector> to memref<?xi64>
-    %12 = sparse_tensor.values %4 : tensor<3x4xi64, #SparseMatrix> to memref<?xi64>
-    %13 = sparse_tensor.values %5 : tensor<3x4xi64, #SparseMatrix> to memref<?xi64>
-    %14 = sparse_tensor.values %6 : tensor<3x4xi64, #SparseMatrix> to memref<?xi64>
-    %15 = sparse_tensor.values %7 : tensor<3x4xi64, #SparseMatrix> to memref<?xi64>
-    %16 = vector.transfer_read %8[%c0], %du: memref<?xi64>, vector<2xi64>
-    %17 = vector.transfer_read %9[%c0], %du: memref<?xi64>, vector<8xi64>
-    %18 = vector.transfer_read %10[%c0], %du: memref<?xi64>, vector<8xi64>
-    %19 = vector.transfer_read %11[%c0], %du: memref<?xi64>, vector<8xi64>
-    %20 = vector.transfer_read %12[%c0], %du: memref<?xi64>, vector<2xi64>
-    %21 = vector.transfer_read %13[%c0], %du: memref<?xi64>, vector<12xi64>
-    %22 = vector.transfer_read %14[%c0], %du: memref<?xi64>, vector<12xi64>
-    %23 = vector.transfer_read %15[%c0], %du: memref<?xi64>, vector<12xi64>
-    vector.print %n0 : index
-    vector.print %n1 : index
-    vector.print %n2 : index
-    vector.print %n3 : index
-    vector.print %n4 : index
-    vector.print %n5 : index
-    vector.print %n6 : index
-    vector.print %n7 : index
-    vector.print %16 : vector<2xi64>
-    vector.print %17 : vector<8xi64>
-    vector.print %18 : vector<8xi64>
-    vector.print %19 : vector<8xi64>
-    vector.print %20 : vector<2xi64>
-    vector.print %21 : vector<12xi64>
-    vector.print %22 : vector<12xi64>
-    vector.print %23 : vector<12xi64>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 8 )
+    // CHECK-NEXT: lvl = ( 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 8
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: values : ( 0, 1, 12, 3, 24, 5, 6, 7
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 8 )
+    // CHECK-NEXT: lvl = ( 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 8
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: values : ( 0, 2, 8, 24, 64, 160, 384, 896
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 8 )
+    // CHECK-NEXT: lvl = ( 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 8
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7
+    // CHECK-NEXT: values : ( 1, 3, 6, 11, 20, 37, 70, 135
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 2
+    // CHECK-NEXT: dim = ( 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2
+    // CHECK-NEXT: crd[1] : ( 1, 3
+    // CHECK-NEXT: values : ( 10, 120
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 0, 1, 2, 3, 1, 12, 3, 4, 2, 3, 4, 25
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 0, 0, 0, 0, 0, 2, 2, 3, 0, 2, 12, 24
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 3, 4 )
+    // CHECK-NEXT: lvl = ( 3, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 2, 4, 4, 5, 3, 4, 7, 9
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %0 : tensor<8xi64, #SparseVector>
+    sparse_tensor.print %1 : tensor<8xi64, #SparseVector>
+    sparse_tensor.print %2 : tensor<8xi64, #SparseVector>
+    sparse_tensor.print %3 : tensor<8xi64, #SparseVector>
+    sparse_tensor.print %4 : tensor<3x4xi64, #SparseMatrix>
+    sparse_tensor.print %5 : tensor<3x4xi64, #SparseMatrix>
+    sparse_tensor.print %6 : tensor<3x4xi64, #SparseMatrix>
+    sparse_tensor.print %7 : tensor<3x4xi64, #SparseMatrix>
+
+    //
+    // Call the f32 kernel, verify the result.
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 2, 3 )
+    // CHECK-NEXT: lvl = ( 2, 3 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 3, 6
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+    // CHECK-NEXT: values : ( 0, 10, 0, 1, 1, 42
+    // CHECK-NEXT: ----
+    //
+    %100 = call @add_outer_2d(%sf32) : (tensor<2x3xf32, #SparseMatrix>)
+      -> tensor<2x3xf32, #SparseMatrix>
+    sparse_tensor.print %100 : tensor<2x3xf32, #SparseMatrix>
 
     // Release resources.
     bufferization.dealloc_tensor %sv : tensor<8xi64, #SparseVector>
@@ -279,17 +328,6 @@ module {
     bufferization.dealloc_tensor %5 : tensor<3x4xi64, #SparseMatrix>
     bufferization.dealloc_tensor %6 : tensor<3x4xi64, #SparseMatrix>
     bufferization.dealloc_tensor %7 : tensor<3x4xi64, #SparseMatrix>
-
-    //
-    // Call the f32 kernel, verify the result, release the resources.
-    //
-    // CHECK-NEXT: ( 0, 10, 0, 1, 1, 42 )
-    //
-    %100 = call @add_outer_2d(%sf32) : (tensor<2x3xf32, #SparseMatrix>)
-      -> tensor<2x3xf32, #SparseMatrix>
-    %101 = sparse_tensor.values %100 : tensor<2x3xf32, #SparseMatrix> to memref<?xf32>
-    %102 = vector.transfer_read %101[%c0], %df: memref<?xf32>, vector<6xf32>
-    vector.print %102 : vector<6xf32>
     bufferization.dealloc_tensor %sf32 : tensor<2x3xf32, #SparseMatrix>
     bufferization.dealloc_tensor %100 : tensor<2x3xf32, #SparseMatrix>
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
index 4bb1f3d12871f3..fc7b82fdecea34 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -138,7 +138,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %du = arith.constant -1 : i64
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
index 364a188cf37c4c..db6612402357e5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -48,7 +48,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %c2 = arith.constant 2 : index


        


More information about the Mlir-commits mailing list