[Mlir-commits] [mlir] [mlir][sparse] Migrate more tests to use sparse_tensor.print (PR #84833)

Yinying Li llvmlistbot at llvm.org
Mon Mar 11 14:09:00 PDT 2024


https://github.com/yinying-lisa-li created https://github.com/llvm/llvm-project/pull/84833

Continuous efforts following #84249.

>From 5926c06ed10955c9eb695d10568f62bd96896023 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Mon, 11 Mar 2024 21:03:07 +0000
Subject: [PATCH] migrate more

---
 .../CPU/concatenate_dim_0_permute.mlir        |  78 ++--
 .../SparseTensor/CPU/concatenate_dim_1.mlir   |  58 ++-
 .../CPU/concatenate_dim_1_permute.mlir        |  72 ++--
 .../SparseTensor/CPU/dual_sparse_conv_2d.mlir |  99 +++--
 .../Dialect/SparseTensor/CPU/reshape_dot.mlir |   4 +-
 .../SparseTensor/CPU/sparse_block3d.mlir      |  38 +-
 .../Dialect/SparseTensor/CPU/sparse_cast.mlir |   4 +-
 .../Dialect/SparseTensor/CPU/sparse_cmp.mlir  |  49 ++-
 .../SparseTensor/CPU/sparse_codegen_dim.mlir  |   4 +-
 .../CPU/sparse_codegen_foreach.mlir           |  11 +-
 .../CPU/sparse_collapse_shape.mlir            | 128 ++++---
 .../CPU/sparse_constant_to_sparse_tensor.mlir |  31 +-
 .../CPU/sparse_conv_1d_nwc_wcf.mlir           |  53 +--
 .../SparseTensor/CPU/sparse_conv_2d.mlir      | 125 ++++---
 .../SparseTensor/CPU/sparse_conv_2d_55.mlir   |   4 +-
 .../CPU/sparse_conv_2d_nchw_fchw.mlir         |   4 +-
 .../CPU/sparse_conv_2d_nhwc_hwcf.mlir         | 166 +++++----
 .../SparseTensor/CPU/sparse_conv_3d.mlir      | 344 +++++++++---------
 .../CPU/sparse_conv_3d_ndhwc_dhwcf.mlir       | 212 ++++++-----
 19 files changed, 777 insertions(+), 707 deletions(-)

diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
index 11edd854ec08a5..21ea145c9a4cb1 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 
@@ -99,20 +99,6 @@ module {
     return
   }
 
-  func.func @dump_mat_perm_9x4(%A: tensor<9x4xf64, #MAT_C_C_P>) {
-    %c = sparse_tensor.convert %A : tensor<9x4xf64, #MAT_C_C_P> to tensor<9x4xf64>
-    %cu = tensor.cast %c : tensor<9x4xf64> to tensor<*xf64>
-    call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
-
-    %n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_C_C_P>
-    vector.print %n : index
-
-    %1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_C_C_P> to memref<?xf64>
-    call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
-
-    return
-  }
-
   func.func @dump_mat_dense_9x4(%A: tensor<9x4xf64>) {
     %u = tensor.cast %A : tensor<9x4xf64> to tensor<*xf64>
     call @printMemrefF64(%u) : (tensor<*xf64>) -> ()
@@ -120,18 +106,8 @@ module {
     return
   }
 
-  func.func @dump_mat_annotated_dense_9x4(%A: tensor<9x4xf64, #MAT_D_D>) {
-    %n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_D_D>
-    vector.print %n : index
-
-    %1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_D_D> to memref<?xf64>
-    call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
-
-    return
-  }
-
   // Driver method to call and verify kernels.
-  func.func @entry() {
+  func.func @main() {
     %m42 = arith.constant dense<
       [ [ 1.0, 0.0 ],
         [ 3.1, 0.0 ],
@@ -163,20 +139,21 @@ module {
     %sm34cdp = sparse_tensor.convert %m34 : tensor<3x4xf64> to tensor<3x4xf64, #MAT_C_D_P>
     %sm44dcp = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C_P>
 
-    // CHECK:      {{\[}}[1,   0,   3,   0],
-    // CHECK-NEXT:  [0,   2,   0,   0],
-    // CHECK-NEXT:  [1,   0,   1,   1],
-    // CHECK-NEXT:  [0,   0.5,   0,   0],
-    // CHECK-NEXT:  [1,   5,   2,   0],
-    // CHECK-NEXT:  [0,   0,   1.5,   1],
-    // CHECK-NEXT:  [0,   3.5,   0,   0],
-    // CHECK-NEXT:  [1,   5,   2,   0],
-    // CHECK-NEXT:  [1,   0.5,   0,   0]]
-    // CHECK-NEXT: 18
-    // CHECK:      [1,  1,  1,  1,  1,  2,  0.5,  5,  3.5,  5,  0.5,  3,  1,  2,  1.5,  2,  1,  1
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 18
+    // CHECK-NEXT: dim = ( 9, 4 )
+    // CHECK-NEXT: lvl = ( 4, 9 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5
+    // CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1
+    // CHECK-NEXT: ----
+    //
     %4 = call @concat_sparse_sparse_perm(%sm24ccp, %sm34cd, %sm44dc)
                : (tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C_P>
-    call @dump_mat_perm_9x4(%4) : (tensor<9x4xf64, #MAT_C_C_P>) -> ()
+    sparse_tensor.print %4  : tensor<9x4xf64, #MAT_C_C_P>
 
     // CHECK:      {{\[}}[1,   0,   3,   0],
     // CHECK-NEXT:  [0,   2,   0,   0],
@@ -191,20 +168,21 @@ module {
                : (tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
     call @dump_mat_dense_9x4(%5) : (tensor<9x4xf64>) -> ()
 
-    // CHECK:      {{\[}}[1,   0,   3,   0],
-    // CHECK-NEXT:  [0,   2,   0,   0],
-    // CHECK-NEXT:  [1,   0,   1,   1],
-    // CHECK-NEXT:  [0,   0.5,   0,   0],
-    // CHECK-NEXT:  [1,   5,   2,   0],
-    // CHECK-NEXT:  [0,   0,   1.5,   1],
-    // CHECK-NEXT:  [0,   3.5,   0,   0],
-    // CHECK-NEXT:  [1,   5,   2,   0],
-    // CHECK-NEXT:  [1,   0.5,   0,   0]]
-    // CHECK-NEXT: 18
-    // CHECK:      [1,  3,  2,  1,  1,  1,  0.5,  1,  5,  2,  1.5,  1,  3.5,  1,  5,  2,  1,  0.5
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 18
+    // CHECK-NEXT: dim = ( 9, 4 )
+    // CHECK-NEXT: lvl = ( 9, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 9
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1
+    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
+    // CHECK-NEXT: ----
+    //
     %6 = call @concat_mix_sparse_perm(%m24, %sm34cdp, %sm44dc)
                : (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
-    call @dump_mat_9x4(%6) : (tensor<9x4xf64, #MAT_C_C>) -> ()
+    sparse_tensor.print %6  : tensor<9x4xf64, #MAT_C_C>
 
     // CHECK:      {{\[}}[1,   0,   3,   0],
     // CHECK-NEXT:  [0,   2,   0,   0],
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
index 48d38257009201..ae067bf18527b7 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 
@@ -82,20 +82,6 @@ module {
     return %0 : tensor<4x9xf64>
   }
 
-  func.func @dump_mat_4x9(%A: tensor<4x9xf64, #MAT_C_C>) {
-    %c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C> to tensor<4x9xf64>
-    %cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
-    call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
-
-    %n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C>
-    vector.print %n : index
-
-    %1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C> to memref<?xf64>
-    call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
-
-    return
-  }
-
   func.func @dump_mat_dense_4x9(%A: tensor<4x9xf64>) {
     %1 = tensor.cast %A : tensor<4x9xf64> to tensor<*xf64>
     call @printMemrefF64(%1) : (tensor<*xf64>) -> ()
@@ -104,7 +90,7 @@ module {
   }
 
   // Driver method to call and verify kernels.
-  func.func @entry() {
+  func.func @main() {
     %m42 = arith.constant dense<
       [ [ 1.0, 0.0 ],
         [ 3.1, 0.0 ],
@@ -125,15 +111,21 @@ module {
     %sm43cd = sparse_tensor.convert %m43 : tensor<4x3xf64> to tensor<4x3xf64, #MAT_C_D>
     %sm44dc = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C>
 
-    // CHECK:      {{\[}}[1,   0,   1,   0,   1,   0,   0,   1.5,   1],
-    // CHECK-NEXT:  [3.1,   0,   1,   0,   0.5,   0,   3.5,   0,   0],
-    // CHECK-NEXT:  [0,   2,   0,   0,   1,   1,   5,   2,   0],
-    // CHECK-NEXT:  [0,   0,   5,   2,   0,   1,   0.5,   0,   0]]
-    // CHECK-NEXT: 18
-    // CHECK:      [1,  1,  1,  1.5,  1,  3.1,  1,  0.5,  3.5,  2,  1,  1,  5,  2,  5,  2,  1,  0.5
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 18
+    // CHECK-NEXT: dim = ( 4, 9 )
+    // CHECK-NEXT: lvl = ( 4, 9 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
+    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
+    // CHECK-NEXT: ----
+    //
     %8 = call @concat_sparse_sparse_dim1(%sm42cc, %sm43cd, %sm44dc)
                : (tensor<4x2xf64, #MAT_C_C>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
-    call @dump_mat_4x9(%8) : (tensor<4x9xf64, #MAT_C_C>) -> ()
+    sparse_tensor.print %8 : tensor<4x9xf64, #MAT_C_C>
 
     // CHECK:      {{\[}}[1,   0,   1,   0,   1,   0,   0,   1.5,   1],
     // CHECK-NEXT:  [3.1,   0,   1,   0,   0.5,   0,   3.5,   0,   0],
@@ -143,15 +135,21 @@ module {
                : (tensor<4x2xf64, #MAT_C_C>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64>
     call @dump_mat_dense_4x9(%9) : (tensor<4x9xf64>) -> ()
 
-    // CHECK:      {{\[}}[1,   0,   1,   0,   1,   0,   0,   1.5,   1],
-    // CHECK-NEXT:  [3.1,   0,   1,   0,   0.5,   0,   3.5,   0,   0],
-    // CHECK-NEXT:  [0,   2,   0,   0,   1,   1,   5,   2,   0],
-    // CHECK-NEXT:  [0,   0,   5,   2,   0,   1,   0.5,   0,   0]]
-    // CHECK-NEXT: 18
-    // CHECK:      [1,  1,  1,  1.5,  1,  3.1,  1,  0.5,  3.5,  2,  1,  1,  5,  2,  5,  2,  1,  0.5
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 18
+    // CHECK-NEXT: dim = ( 4, 9 )
+    // CHECK-NEXT: lvl = ( 4, 9 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
+    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
+    // CHECK-NEXT: ----
+    //
     %10 = call @concat_mix_sparse_dim1(%m42, %sm43cd, %sm44dc)
                : (tensor<4x2xf64>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
-    call @dump_mat_4x9(%10) : (tensor<4x9xf64, #MAT_C_C>) -> ()
+    sparse_tensor.print %10 : tensor<4x9xf64, #MAT_C_C>
 
     // CHECK:      {{\[}}[1,   0,   1,   0,   1,   0,   0,   1.5,   1],
     // CHECK-NEXT:  [3.1,   0,   1,   0,   0.5,   0,   3.5,   0,   0],
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
index dcdaa072c02fd8..ce746f27c4d881 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 
@@ -85,34 +85,6 @@ module {
     return %0 : tensor<4x9xf64>
   }
 
-  func.func @dump_mat_4x9(%A: tensor<4x9xf64, #MAT_C_C>) {
-    %c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C> to tensor<4x9xf64>
-    %cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
-    call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
-
-    %n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C>
-    vector.print %n : index
-
-    %1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C> to memref<?xf64>
-    call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
-
-    return
-  }
-
-  func.func @dump_mat_perm_4x9(%A: tensor<4x9xf64, #MAT_C_C_P>) {
-    %c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C_P> to tensor<4x9xf64>
-    %cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
-    call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
-
-    %n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C_P>
-    vector.print %n : index
-
-    %1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C_P> to memref<?xf64>
-    call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
-
-    return
-  }
-
   func.func @dump_mat_dense_4x9(%A: tensor<4x9xf64>) {
     %1 = tensor.cast %A : tensor<4x9xf64> to tensor<*xf64>
     call @printMemrefF64(%1) : (tensor<*xf64>) -> ()
@@ -121,7 +93,7 @@ module {
   }
 
   // Driver method to call and verify kernels.
-  func.func @entry() {
+  func.func @main() {
     %m42 = arith.constant dense<
       [ [ 1.0, 0.0 ],
         [ 3.1, 0.0 ],
@@ -153,15 +125,21 @@ module {
     %sm43cdp = sparse_tensor.convert %m43 : tensor<4x3xf64> to tensor<4x3xf64, #MAT_C_D_P>
     %sm44dcp = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C_P>
 
-    // CHECK:      {{\[}}[1,   0,   1,   0,   1,   0,   0,   1.5,   1],
-    // CHECK-NEXT:  [3.1,   0,   1,   0,   0.5,   0,   3.5,   0,   0],
-    // CHECK-NEXT:  [0,   2,   0,   0,   1,   1,   5,   2,   0],
-    // CHECK-NEXT:  [0,   0,   5,   2,   0,   1,   0.5,   0,   0]]
-    // CHECK-NEXT: 18
-    // CHECK:      [1,  3.1,  2,  1,  1,  5,  2,  1,  0.5,  1,  1,  1,  3.5,  5,  0.5,  1.5,  2,  1
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 18
+    // CHECK-NEXT: dim = ( 4, 9 )
+    // CHECK-NEXT: lvl = ( 9, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 9
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0
+    // CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1
+    // CHECK-NEXT: ----
+    //
     %12 = call @concat_sparse_sparse_perm_dim1(%sm42ccp, %sm43cd, %sm44dc)
                : (tensor<4x2xf64, #MAT_C_C_P>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C_P>
-    call @dump_mat_perm_4x9(%12) : (tensor<4x9xf64, #MAT_C_C_P>) -> ()
+    sparse_tensor.print %12 : tensor<4x9xf64, #MAT_C_C_P>
 
     // CHECK:      {{\[}}[1,   0,   1,   0,   1,   0,   0,   1.5,   1],
     // CHECK-NEXT:  [3.1,   0,   1,   0,   0.5,   0,   3.5,   0,   0],
@@ -171,15 +149,21 @@ module {
                : (tensor<4x2xf64, #MAT_C_C_P>, tensor<4x3xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64>
     call @dump_mat_dense_4x9(%13) : (tensor<4x9xf64>) -> ()
 
-    // CHECK:      {{\[}}[1,   0,   1,   0,   1,   0,   0,   1.5,   1],
-    // CHECK-NEXT:  [3.1,   0,   1,   0,   0.5,   0,   3.5,   0,   0],
-    // CHECK-NEXT:  [0,   2,   0,   0,   1,   1,   5,   2,   0],
-    // CHECK-NEXT:  [0,   0,   5,   2,   0,   1,   0.5,   0,   0]]
-    // CHECK-NEXT: 18
-    // CHECK:      [1,  1,  1,  1.5,  1,  3.1,  1,  0.5,  3.5,  2,  1,  1,  5,  2,  5,  2,  1,  0.5
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 18
+    // CHECK-NEXT: dim = ( 4, 9 )
+    // CHECK-NEXT: lvl = ( 4, 9 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
+    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
+    // CHECK-NEXT: ----
+    //
     %14 = call @concat_mix_sparse_perm_dim1(%m42, %sm43cdp, %sm44dc)
                : (tensor<4x2xf64>, tensor<4x3xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
-    call @dump_mat_4x9(%14) : (tensor<4x9xf64, #MAT_C_C>) -> ()
+    sparse_tensor.print %14 : tensor<4x9xf64, #MAT_C_C>
 
     // CHECK:      {{\[}}[1,   0,   1,   0,   1,   0,   0,   1.5,   1],
     // CHECK-NEXT:  [3.1,   0,   1,   0,   0.5,   0,   3.5,   0,   0],
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
index 6c35e2b51ed8f4..350b5b41dafc00 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -85,7 +85,7 @@ module {
     return %0 : tensor<6x6xi32, #CSC>
   }
 
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %i0 = arith.constant 0 : i32
 
@@ -141,7 +141,6 @@ module {
        : (tensor<8x8xi32, #CSC>,
           tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC>
 
-
     // Verify the output.
     //
     // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
@@ -156,64 +155,62 @@ module {
     vector.print %v : vector<6x6xi32>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %all_sparse_DCSR = sparse_tensor.convert %2
-      : tensor<6x6xi32, #DCSR> to tensor<6x6xi32>
-    %v2 = vector.transfer_read %all_sparse_DCSR[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v2 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %2 : tensor<6x6xi32, #DCSR>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %all_sparse_CD = sparse_tensor.convert %4
-      : tensor<6x6xi32, #CDR> to tensor<6x6xi32>
-    %v4 = vector.transfer_read %all_sparse_CD[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v4 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %3 : tensor<6x6xi32, #CSR>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %all_sparse_CSR = sparse_tensor.convert %3
-      : tensor<6x6xi32, #CSR> to tensor<6x6xi32>
-    %v3 = vector.transfer_read %all_sparse_CSR[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v3 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %4 : tensor<6x6xi32, #CDR>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %all_sparse_CSC = sparse_tensor.convert %5
-      : tensor<6x6xi32, #CSC> to tensor<6x6xi32>
-    %v5 = vector.transfer_read %all_sparse_CSC[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v5 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, -1, 0, -1, 0, 2, 0, 0, -1, 0, 0, -1, -1, 1, 1, 0, 3, 3, -6, 0, 0, 0, 6, 0, -1, 1, 0, 0, -3, -3, 6, 0, 0, 0, -6, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %5 : tensor<6x6xi32, #CSC>
 
     // Release the resources.
     bufferization.dealloc_tensor %sparse_input_DCSR : tensor<8x8xi32, #DCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
index 689428c23f7d78..ebf9f4392d859b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -84,7 +84,7 @@ module {
   }
 
 
-  func.func @entry() {
+  func.func @main() {
     // Setup two sparse vectors.
     %d1 = arith.constant sparse<
         [ [0, 0], [1, 1], [2, 2], [2, 3], [4, 5] ],
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir
index 024e86b4f165bb..2ff73923c8327d 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir
@@ -90,28 +90,38 @@ module {
     // ending at index (3,3,2)) with a “DCSR-flavored” along (j,k) with
     // dense “fibers” in the i-dim, we end up with 8 stored entries.
     //
-    // CHECK: 8
-    // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 4, 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 3
+    // CHECK-NEXT: pos[1] : ( 0, 1, 2
+    // CHECK-NEXT: crd[1] : ( 0, 2
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8
+    // CHECK-NEXT: ----
     //
-    %na = sparse_tensor.number_of_entries %a : tensor<4x4x4xi32, #Sparse1>
-    vector.print %na : index
-    %ma = sparse_tensor.values %a: tensor<4x4x4xi32, #Sparse1> to memref<?xi32>
-    %va = vector.transfer_read %ma[%c0], %i0: memref<?xi32>, vector<8xi32>
-    vector.print %va : vector<8xi32>
+    sparse_tensor.print %a : tensor<4x4x4xi32, #Sparse1>
 
     //
     // If we store full 2x2x2 3-D blocks in the original index order
     // in a compressed fashion, we end up with 4 blocks to incorporate
     // all the nonzeros, and thus 32 stored entries.
     //
-    // CHECK: 32
-    // CHECK-NEXT: ( 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 5, 0, 0, 0, 6, 0, 3, 0, 0, 0, 4, 0, 0, 0, 0, 0, 7, 0, 0, 0, 8, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 32
+    // CHECK-NEXT: dim = ( 4, 4, 4 )
+    // CHECK-NEXT: lvl = ( 2, 2, 2, 2, 2, 2 )
+    // CHECK-NEXT: pos[0] : ( 0, 2
+    // CHECK-NEXT: crd[0] : ( 0, 1
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4
+    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1
+    // CHECK-NEXT: pos[2] : ( 0, 1, 2, 3, 4
+    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1
+    // CHECK-NEXT: values : ( 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 5, 0, 0, 0, 6, 0, 3, 0, 0, 0, 4, 0, 0, 0, 0, 0, 7, 0, 0, 0, 8, 0
+    // CHECK-NEXT: ----
     //
-    %nb = sparse_tensor.number_of_entries %b : tensor<4x4x4xi32, #Sparse2>
-    vector.print %nb : index
-    %mb = sparse_tensor.values %b: tensor<4x4x4xi32, #Sparse2> to memref<?xi32>
-    %vb = vector.transfer_read %mb[%c0], %i0: memref<?xi32>, vector<32xi32>
-    vector.print %vb : vector<32xi32>
+    sparse_tensor.print %b : tensor<4x4x4xi32, #Sparse2>
 
     // Release the resources.
     bufferization.dealloc_tensor %a : tensor<4x4x4xi32, #Sparse1>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
index 6efe7b334b9845..3b5168db23c584 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -178,7 +178,7 @@ module {
   // Main driver that converts a dense tensor into a sparse tensor
   // and then calls the sparse casting kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %z = arith.constant 0 : index
     %b = arith.constant 0 : i8
     %i = arith.constant 0 : i32
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir
index 035db33fb4b318..732bde55be91f9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -96,7 +96,7 @@ module {
   // Main driver that constructs matrix and calls the sparse kernel to perform
   // element-wise comparison.
   //
-  func.func @entry() {
+  func.func @main() {
     %d0 = arith.constant 0 : i8
     %c0 = arith.constant 0 : index
 
@@ -124,33 +124,44 @@ module {
             : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR>
 
     //
-    // All should have the same result.
+    // All should have the same boolean values.
+    //
+    // CHECK: ( ( 0, 1, 0, 1 ), ( 1, 0, 0, 0 ), ( 1, 0, 0, 1 ), ( 0, 0, 0, 0 ) )
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 16
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12, 16
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+    // CHECK-NEXT: values : ( 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 11
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 4
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+    // CHECK-NEXT: pos[1] : ( 0, 3, 5, 9, 11
+    // CHECK-NEXT: crd[1] : ( 1, 2, 3, 0, 1, 0, 1, 2, 3, 0, 1
+    // CHECK-NEXT: values : ( 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0
+    // CHECK-NEXT: ----
     //
-    // CHECK-COUNT-3: ( ( 0, 1, 0, 1 ), ( 1, 0, 0, 0 ), ( 1, 0, 0, 1 ), ( 0, 0, 0, 0 ) )
     %v = vector.transfer_read %all_dn_out[%c0, %c0], %d0
        : tensor<4x4xi8>, vector<4x4xi8>
     vector.print %v : vector<4x4xi8>
-
-    %lhs_sp_ret = sparse_tensor.convert %lhs_sp_out
-      : tensor<4x4xi8, #DCSR> to tensor<4x4xi8>
-    %v1 = vector.transfer_read %lhs_sp_ret[%c0, %c0], %d0
-      : tensor<4x4xi8>, vector<4x4xi8>
-    vector.print %v1 : vector<4x4xi8>
-
-    %rhs_sp_ret = sparse_tensor.convert %all_sp_out
-      : tensor<4x4xi8, #DCSR> to tensor<4x4xi8>
-    %v2 = vector.transfer_read %rhs_sp_ret[%c0, %c0], %d0
-      : tensor<4x4xi8>, vector<4x4xi8>
-    vector.print %v2 : vector<4x4xi8>
-
+    sparse_tensor.print %lhs_sp_out : tensor<4x4xi8, #DCSR>
+    sparse_tensor.print %all_sp_out : tensor<4x4xi8, #DCSR>
 
     bufferization.dealloc_tensor %lhs_sp : tensor<4x4xf64, #DCSR>
     bufferization.dealloc_tensor %rhs_sp : tensor<4x4xf64, #DCSR>
     bufferization.dealloc_tensor %all_dn_out : tensor<4x4xi8>
     bufferization.dealloc_tensor %lhs_sp_out : tensor<4x4xi8, #DCSR>
     bufferization.dealloc_tensor %all_sp_out : tensor<4x4xi8, #DCSR>
-    bufferization.dealloc_tensor %lhs_sp_ret : tensor<4x4xi8>
-    bufferization.dealloc_tensor %rhs_sp_ret : tensor<4x4xi8>
+
     return
   }
 }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir
index 7925759714eddb..c5d002aa163917 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -38,7 +38,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %c2 = arith.constant 2 : index
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
index 002a79055ce558..9deb5cd05fa3b3 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -144,7 +144,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     //
     // Initialize a 3-dim dense tensor.
     //
@@ -166,6 +166,7 @@ module {
     %s4 = sparse_tensor.convert %src : tensor<2x2xf64> to tensor<2x2xf64, #SortedCOO>
     %s5 = sparse_tensor.convert %src : tensor<2x2xf64> to tensor<2x2xf64, #SortedCOOPerm>
     %s6 = sparse_tensor.convert %src3d : tensor<7x8x9xf64>  to tensor<7x8x9xf64, #CCCPerm>
+
     // CHECK: 0
     // CHECK-NEXT: 0
     // CHECK-NEXT: 1
@@ -173,6 +174,7 @@ module {
     // CHECK-NEXT: 6
     // CHECK-NEXT: 5
     call @foreach_print_const() : () -> ()
+
     // CHECK-NEXT: 0
     // CHECK-NEXT: 0
     // CHECK-NEXT: 1
@@ -186,6 +188,7 @@ module {
     // CHECK-NEXT: 1
     // CHECK-NEXT: 6
     call @foreach_print_dense(%src) : (tensor<2x2xf64>) -> ()
+
     // CHECK-NEXT: 0
     // CHECK-NEXT: 0
     // CHECK-NEXT: 1
@@ -199,6 +202,7 @@ module {
     // CHECK-NEXT: 1
     // CHECK-NEXT: 6
     call @foreach_print_1(%s1) : (tensor<2x2xf64, #Row>) -> ()
+
     // CHECK-NEXT: 0
     // CHECK-NEXT: 0
     // CHECK-NEXT: 1
@@ -212,6 +216,7 @@ module {
     // CHECK-NEXT: 1
     // CHECK-NEXT: 6
     call @foreach_print_2(%s2) : (tensor<2x2xf64, #CSR>) -> ()
+
     // CHECK-NEXT: 0
     // CHECK-NEXT: 0
     // CHECK-NEXT: 1
@@ -225,6 +230,7 @@ module {
     // CHECK-NEXT: 1
     // CHECK-NEXT: 6
     call @foreach_print_3(%s3) : (tensor<2x2xf64, #DCSC>) -> ()
+
     // CHECK-NEXT: 0
     // CHECK-NEXT: 0
     // CHECK-NEXT: 1
@@ -238,6 +244,7 @@ module {
     // CHECK-NEXT: 1
     // CHECK-NEXT: 6
     call @foreach_print_4(%s4) : (tensor<2x2xf64, #SortedCOO>) -> ()
+
     // CHECK-NEXT: 0
     // CHECK-NEXT: 0
     // CHECK-NEXT: 1
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir
index 2b5155464f0eef..cae599fa30ae26 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -115,7 +115,7 @@ module {
   //
   // Main driver.
   //
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %df = arith.constant -1.0 : f64
 
@@ -157,69 +157,95 @@ module {
     //
     // CHECK:      ( 1.1, 0, 1.3, 0, 2.1, 0, 2.3, 0, 3.1, 0, 3.3, 0 )
     // CHECK-NEXT: ( 1.1, 0, 1.3, 0, 2.1, 0, 2.3, 0, 3.1, 0, 3.3, 0 )
-    // CHECK-NEXT: ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
-    // CHECK-NEXT: ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
-    // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME:   ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME:   ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) )
-    // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME:   ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME:   ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) )
-    // CHECK-NEXT: ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47
-    // CHECK-NEXT: ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47
-    // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME:   ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME:   ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) )
-    // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME:   ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME:   ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ),
-    // CHECK-SAME:   ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) )
-    // CHECK-NEXT: ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47, 49
-    // CHECK-NEXT: ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47, 49
-
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 12 )
+    // CHECK-NEXT: lvl = ( 12 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 2, 4, 6, 8, 10
+    // CHECK-NEXT: values : ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 12 )
+    // CHECK-NEXT: lvl = ( 12 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 2, 4, 6, 8, 10
+    // CHECK-NEXT: values : ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) )
+    // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) )
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 15
+    // CHECK-NEXT: dim = ( 6, 10 )
+    // CHECK-NEXT: lvl = ( 6, 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 2, 4
+    // CHECK-NEXT: pos[1] : ( 0, 5, 10, 15
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 6, 8, 0, 2, 4, 6, 8, 0, 2, 4, 6, 8
+    // CHECK-NEXT: values : ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47, 49
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 15
+    // CHECK-NEXT: dim = ( 6, 10 )
+    // CHECK-NEXT: lvl = ( 6, 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 2, 4
+    // CHECK-NEXT: pos[1] : ( 0, 5, 10, 15
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 6, 8, 0, 2, 4, 6, 8, 0, 2, 4, 6, 8
+    // CHECK-NEXT: values : ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47, 49
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) )
+    // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) )
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 15
+    // CHECK-NEXT: dim = ( 6, 10 )
+    // CHECK-NEXT: lvl = ( 6, 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 2, 4
+    // CHECK-NEXT: pos[1] : ( 0, 5, 10, 15
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 6, 8, 0, 2, 4, 6, 8, 0, 2, 4, 6, 8
+    // CHECK-NEXT: values : ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47, 49
+    // CHECK-NEXT: ----
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 15
+    // CHECK-NEXT: dim = ( 6, 10 )
+    // CHECK-NEXT: lvl = ( 6, 10 )
+    // CHECK-NEXT: pos[0] : ( 0, 3
+    // CHECK-NEXT: crd[0] : ( 0, 2, 4
+    // CHECK-NEXT: pos[1] : ( 0, 5, 10, 15
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 6, 8, 0, 2, 4, 6, 8, 0, 2, 4, 6, 8
+    // CHECK-NEXT: values : ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47, 49
+    // CHECK-NEXT: ----
+    //
     %v0 = vector.transfer_read %collapse0[%c0], %df: tensor<12xf64>, vector<12xf64>
     vector.print %v0 : vector<12xf64>
     %v1 = vector.transfer_read %collapse1[%c0], %df: tensor<12xf64>, vector<12xf64>
     vector.print %v1 : vector<12xf64>
-    %b2 = sparse_tensor.values %collapse2 : tensor<12xf64, #SparseVector> to memref<?xf64>
-    %v2 = vector.transfer_read %b2[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %v2 : vector<12xf64>
-    %b3 = sparse_tensor.values %collapse3 : tensor<12xf64, #SparseVector> to memref<?xf64>
-    %v3 = vector.transfer_read %b3[%c0], %df: memref<?xf64>, vector<12xf64>
-    vector.print %v3 : vector<12xf64>
+    sparse_tensor.print %collapse2 : tensor<12xf64, #SparseVector>
+    sparse_tensor.print %collapse3 : tensor<12xf64, #SparseVector>
 
     %v4 = vector.transfer_read %collapse4[%c0, %c0], %df: tensor<6x10xf64>, vector<6x10xf64>
     vector.print %v4 : vector<6x10xf64>
     %v5 = vector.transfer_read %collapse5[%c0, %c0], %df: tensor<6x10xf64>, vector<6x10xf64>
     vector.print %v5 : vector<6x10xf64>
-    %b6 = sparse_tensor.values %collapse6 : tensor<6x10xf64, #SparseMatrix> to memref<?xf64>
-    %v6 = vector.transfer_read %b6[%c0], %df: memref<?xf64>, vector<60xf64>
-    vector.print %v6 : vector<60xf64>
-    %b7 = sparse_tensor.values %collapse7 : tensor<6x10xf64, #SparseMatrix> to memref<?xf64>
-    %v7 = vector.transfer_read %b7[%c0], %df: memref<?xf64>, vector<60xf64>
-    vector.print %v7 : vector<60xf64>
+    sparse_tensor.print %collapse6 : tensor<6x10xf64, #SparseMatrix>
+    sparse_tensor.print %collapse7 : tensor<6x10xf64, #SparseMatrix>
 
     %v8 = vector.transfer_read %collapse8[%c0, %c0], %df: tensor<?x?xf64>, vector<6x10xf64>
     vector.print %v8 : vector<6x10xf64>
     %v9 = vector.transfer_read %collapse9[%c0, %c0], %df: tensor<?x?xf64>, vector<6x10xf64>
     vector.print %v9 : vector<6x10xf64>
-    %b10 = sparse_tensor.values %collapse10 : tensor<?x?xf64, #SparseMatrix> to memref<?xf64>
-    %v10 = vector.transfer_read %b10[%c0], %df: memref<?xf64>, vector<60xf64>
-    vector.print %v10 : vector<60xf64>
-    %b11 = sparse_tensor.values %collapse11 : tensor<?x?xf64, #SparseMatrix> to memref<?xf64>
-    %v11 = vector.transfer_read %b11[%c0], %df: memref<?xf64>, vector<60xf64>
-    vector.print %v11 : vector<60xf64>
+    sparse_tensor.print %collapse10 : tensor<?x?xf64, #SparseMatrix>
+    sparse_tensor.print %collapse11 : tensor<?x?xf64, #SparseMatrix>
 
     // Release sparse resources.
     bufferization.dealloc_tensor %sm : tensor<3x4xf64, #SparseMatrix>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir
index b5efdcc09a3901..abdbf80d0bc411 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -38,7 +38,7 @@
 // Integration tests for conversions from sparse constants to sparse tensors.
 //
 module {
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
     %c2 = arith.constant 2 : index
@@ -51,20 +51,19 @@ module {
     // Convert the tensor in COO format to a sparse tensor with annotation #Tensor1.
     %ts = sparse_tensor.convert %ti : tensor<10x8xf64> to tensor<10x8xf64, #Tensor1>
 
-    // CHECK: ( 0, 1, 4, 5, 6, 9 )
-    %i0 = sparse_tensor.coordinates %ts { level = 0 : index } : tensor<10x8xf64, #Tensor1> to memref<?xindex>
-    %i0r = vector.transfer_read %i0[%c0], %c0: memref<?xindex>, vector<6xindex>
-    vector.print %i0r : vector<6xindex>
-
-    // CHECK: ( 0, 7, 2, 2, 3, 4, 6, 7 )
-    %i1 = sparse_tensor.coordinates %ts { level = 1 : index } : tensor<10x8xf64, #Tensor1> to memref<?xindex>
-    %i1r = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<8xindex>
-    vector.print %i1r : vector<8xindex>
-
-    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8 )
-    %v = sparse_tensor.values %ts : tensor<10x8xf64, #Tensor1> to memref<?xf64>
-    %vr = vector.transfer_read %v[%c0], %d0: memref<?xf64>, vector<8xf64>
-    vector.print %vr : vector<8xf64>
+    //
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 8
+    // CHECK-NEXT: dim = ( 10, 8 )
+    // CHECK-NEXT: lvl = ( 10, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 4, 5, 6, 9
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 4, 5, 7, 8
+    // CHECK-NEXT: crd[1] : ( 0, 7, 2, 2, 3, 4, 6, 7
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %ts : tensor<10x8xf64, #Tensor1>
 
     // Release the resources.
     bufferization.dealloc_tensor %ts : tensor<10x8xf64, #Tensor1>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir
index 16a67a1458369c..612e62bd34d283 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -79,7 +79,7 @@ func.func @conv_1d_nwc_wcf_CDC(%arg0: tensor<?x?x?xf32, #CDC>, %arg1: tensor<?x?
   return %ret : tensor<?x?x?xf32, #CDC>
 }
 
-func.func @entry() {
+func.func @main() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
@@ -111,23 +111,35 @@ func.func @entry() {
       : tensor<?x?x?xf32>, vector<3x6x1xf32>
   vector.print %dense_v : vector<3x6x1xf32>
 
-  //      CHECK: ( ( ( 12 ), ( 28 ), ( 28 ), ( 28 ), ( 12 ), ( 12 ) ),
-  // CHECK-SAME:   ( ( 12 ), ( 12 ), ( 12 ), ( 12 ), ( 12 ), ( 12 ) ),
-  // CHECK-SAME:   ( ( 12 ), ( 12 ), ( 12 ), ( 12 ), ( 12 ), ( 12 ) ) )
-  %1 = sparse_tensor.convert %CCC_ret
-    : tensor<?x?x?xf32, #CCC> to tensor<?x?x?xf32>
-  %v1 = vector.transfer_read %1[%c0, %c0, %c0], %zero
-      : tensor<?x?x?xf32>, vector<3x6x1xf32>
-  vector.print %v1 : vector<3x6x1xf32>
-
-  //      CHECK: ( ( ( 12 ), ( 28 ), ( 28 ), ( 28 ), ( 12 ), ( 12 ) ),
-  // CHECK-SAME:   ( ( 12 ), ( 12 ), ( 12 ), ( 12 ), ( 12 ), ( 12 ) ),
-  // CHECK-SAME:   ( ( 12 ), ( 12 ), ( 12 ), ( 12 ), ( 12 ), ( 12 ) ) )
-  %2 = sparse_tensor.convert %CDC_ret
-    : tensor<?x?x?xf32, #CDC> to tensor<?x?x?xf32>
-  %v2 = vector.transfer_read %2[%c0, %c0, %c0], %zero
-      : tensor<?x?x?xf32>, vector<3x6x1xf32>
-  vector.print %v2 : vector<3x6x1xf32>
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 18
+  // CHECK-NEXT: dim = ( 3, 6, 1 )
+  // CHECK-NEXT: lvl = ( 3, 6, 1 )
+  // CHECK-NEXT: pos[0] : ( 0, 3
+  // CHECK-NEXT: crd[0] : ( 0, 1, 2
+  // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18
+  // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[2] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
+  // CHECK-NEXT: crd[2] : ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+  // CHECK-NEXT: values : ( 12, 28, 28, 28, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CCC_ret : tensor<?x?x?xf32, #CCC>
+
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 18
+  // CHECK-NEXT: dim = ( 3, 6, 1 )
+  // CHECK-NEXT: lvl = ( 3, 6, 1 )
+  // CHECK-NEXT: pos[0] : ( 0, 3
+  // CHECK-NEXT: crd[0] : ( 0, 1, 2
+  // CHECK-NEXT: pos[2] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
+  // CHECK-NEXT: crd[2] : ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+  // CHECK-NEXT: values : ( 12, 28, 28, 28, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CDC_ret : tensor<?x?x?xf32, #CDC>
 
   // Free the resources
   bufferization.dealloc_tensor %in1D_nwc : tensor<?x?x?xf32>
@@ -140,8 +152,5 @@ func.func @entry() {
   bufferization.dealloc_tensor %CCC_ret : tensor<?x?x?xf32, #CCC>
   bufferization.dealloc_tensor %CDC_ret : tensor<?x?x?xf32, #CDC>
 
-  bufferization.dealloc_tensor %1 : tensor<?x?x?xf32>
-  bufferization.dealloc_tensor %2 : tensor<?x?x?xf32>
-
   return
 }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
index 41071ea700fb60..f8fb8fdf53e356 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -113,7 +113,7 @@ module {
     return %0 : tensor<6x6xi32, #CSC>
   }
 
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %i0 = arith.constant 0 : i32
 
@@ -181,82 +181,81 @@ module {
     vector.print %v : vector<6x6xi32>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %sparse_ret = sparse_tensor.convert %1
-      : tensor<6x6xi32, #DCSR> to tensor<6x6xi32>
-    %v1 = vector.transfer_read %sparse_ret[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v1 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %1 : tensor<6x6xi32, #DCSR>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %all_sparse_DCSR = sparse_tensor.convert %2
-      : tensor<6x6xi32, #DCSR> to tensor<6x6xi32>
-    %v2 = vector.transfer_read %all_sparse_DCSR[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v2 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %2 : tensor<6x6xi32, #DCSR>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %all_sparse_CD = sparse_tensor.convert %4
-      : tensor<6x6xi32, #CDR> to tensor<6x6xi32>
-    %v4 = vector.transfer_read %all_sparse_CD[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v4 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %3 : tensor<6x6xi32, #CSR>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %all_sparse_CSR = sparse_tensor.convert %3
-      : tensor<6x6xi32, #CSR> to tensor<6x6xi32>
-    %v3 = vector.transfer_read %all_sparse_CSR[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v3 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[0] : ( 0, 6
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %4 : tensor<6x6xi32, #CDR>
 
     //
-    // Should be the same as dense output
-    // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
-    // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
-    // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
-    // CHECK-SAME: ( -1, 0, 0, 0, 0, 0 ),
-    // CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
-    // CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
+    // Should be the same as dense output.
     //
-    %all_sparse_CSC = sparse_tensor.convert %5
-      : tensor<6x6xi32, #CSC> to tensor<6x6xi32>
-    %v5 = vector.transfer_read %all_sparse_CSC[%c0, %c0], %i0
-      : tensor<6x6xi32>, vector<6x6xi32>
-    vector.print %v5 : vector<6x6xi32>
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 36
+    // CHECK-NEXT: dim = ( 6, 6 )
+    // CHECK-NEXT: lvl = ( 6, 6 )
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+    // CHECK-NEXT: values : ( 0, -1, 0, -1, 0, 2, 0, 0, -1, 0, 0, -1, -1, 1, 1, 0, 3, 3, -6, 0, 0, 0, 6, 0, -1, 1, 0, 0, -3, -3, 6, 0, 0, 0, -6, 0
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %5 : tensor<6x6xi32, #CSC>
 
     //
-    // Should be the same as dense output
+    // Should be the same as dense output.
     // CHECK:    ( ( 0, 0, -1, -6, -1, 6 ),
     // CHECK-SAME: ( -1, 0, 1, 0, 1, 0 ),
     // CHECK-SAME: ( 0, -1, 1, 0, 0, 0 ),
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_55.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_55.mlir
index a7d7d1c5ed3c31..00805d198013d5 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_55.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_55.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -68,7 +68,7 @@ module {
     return %0 : tensor<6x6xi32>
   }
 
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %i0 = arith.constant 0 : i32
 
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir
index 95ce4f1bf48d5d..9150e97e72481b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -82,7 +82,7 @@ func.func @conv_2d_nchw_fchw_CCCC_CCCC(%arg0: tensor<?x?x?x?xf32, #CCCC>, %arg1:
   return %ret : tensor<?x?x?x?xf32>
 }
 
-func.func @entry() {
+func.func @main() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir
index d0fbce7146fe57..d04311e59bafad 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -93,7 +93,7 @@ func.func @conv_2d_nhwc_hwcf_DCCD(%arg0: tensor<?x?x?x?xf32, #DCCD>, %arg1: tens
   return %ret : tensor<?x?x?x?xf32, #DCCD>
 }
 
-func.func @entry() {
+func.func @main() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
@@ -142,77 +142,93 @@ func.func @entry() {
       : tensor<?x?x?x?xf32>, vector<3x6x6x1xf32>
   vector.print %dense_v : vector<3x6x6x1xf32>
 
-  // CHECK:     ( ( ( ( 108 ), ( 124 ), ( 124 ), ( 124 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:  ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:  ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ) )
-  %1 = sparse_tensor.convert %CCCC_ret
-    : tensor<?x?x?x?xf32, #CCCC> to tensor<?x?x?x?xf32>
-  %v1 = vector.transfer_read %1[%c0, %c0, %c0, %c0], %zero
-      : tensor<?x?x?x?xf32>, vector<3x6x6x1xf32>
-  vector.print %v1 : vector<3x6x6x1xf32>
-
-  // CHECK:     ( ( ( ( 108 ), ( 124 ), ( 124 ), ( 124 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:  ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:  ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ) )
-  %2 = sparse_tensor.convert %CDCD_ret
-    : tensor<?x?x?x?xf32, #CDCD> to tensor<?x?x?x?xf32>
-  %v2 = vector.transfer_read %2[%c0, %c0, %c0, %c0], %zero
-      : tensor<?x?x?x?xf32>, vector<3x6x6x1xf32>
-  vector.print %v2 : vector<3x6x6x1xf32>
-
-  // CHECK:     ( ( ( ( 108 ), ( 124 ), ( 124 ), ( 124 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:  ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:  ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:    ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ) )
-  %3 = sparse_tensor.convert %DCCD_ret
-    : tensor<?x?x?x?xf32, #DCCD> to tensor<?x?x?x?xf32>
-  %v3 = vector.transfer_read %3[%c0, %c0, %c0, %c0], %zero
-      : tensor<?x?x?x?xf32>, vector<3x6x6x1xf32>
-  vector.print %v3 : vector<3x6x6x1xf32>
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 108
+  // CHECK-NEXT: dim = ( 3, 6, 6, 1 )
+  // CHECK-NEXT: lvl = ( 3, 6, 6, 1 )
+  // CHECK-NEXT: pos[0] : ( 0, 3
+  // CHECK-NEXT: crd[0] : ( 0, 1, 2
+  // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18
+  // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96, 102, 108
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0,
+  // CHECK-SAME:            1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
+  // CHECK-SAME:            3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[3] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+  // CHECK-SAME:            21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+  // CHECK-SAME:            40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+  // CHECK-SAME:            59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+  // CHECK-SAME:            78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
+  // CHECK-SAME:            97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108
+  // CHECK-NEXT: crd[3] : ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0
+  // CHECK-NEXT: values : ( 108, 124, 124, 124, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CCCC_ret : tensor<?x?x?x?xf32, #CCCC>
+
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 108
+  // CHECK-NEXT: dim = ( 3, 6, 6, 1 )
+  // CHECK-NEXT: lvl = ( 3, 6, 6, 1 )
+  // CHECK-NEXT: pos[0] : ( 0, 3
+  // CHECK-NEXT: crd[0] : ( 0, 1, 2
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96, 102, 108
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0,
+  // CHECK-SAME:            1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
+  // CHECK-SAME:            3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: values : ( 108, 124, 124, 124, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CDCD_ret : tensor<?x?x?x?xf32, #CDCD>
+
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 108
+  // CHECK-NEXT: dim = ( 3, 6, 6, 1 )
+  // CHECK-NEXT: lvl = ( 3, 6, 6, 1 )
+  // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18
+  // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96, 102, 108
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0,
+  // CHECK-SAME:            1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
+  // CHECK-SAME:            3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: values : ( 108, 124, 124, 124, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %DCCD_ret : tensor<?x?x?x?xf32, #DCCD>
 
   // Free the resources
   bufferization.dealloc_tensor %in2D_nhwc : tensor<?x?x?x?xf32>
@@ -227,9 +243,5 @@ func.func @entry() {
   bufferization.dealloc_tensor %CDCD_ret : tensor<?x?x?x?xf32, #CDCD>
   bufferization.dealloc_tensor %DCCD_ret : tensor<?x?x?x?xf32, #DCCD>
 
-  bufferization.dealloc_tensor %1 : tensor<?x?x?x?xf32>
-  bufferization.dealloc_tensor %2 : tensor<?x?x?x?xf32>
-  bufferization.dealloc_tensor %3 : tensor<?x?x?x?xf32>
-
   return
 }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir
index f0a26dc46b056e..5e2d1707a2495e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -96,7 +96,7 @@ func.func @conv_3d_DDC(%arg0: tensor<?x?x?xf32, #DDC>, %arg1: tensor<?x?x?xf32>)
   return %ret : tensor<?x?x?xf32, #DDC>
 }
 
-func.func @entry() {
+func.func @main() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
@@ -166,173 +166,180 @@ func.func @entry() {
       : tensor<?x?x?xf32>, vector<6x6x6xf32>
   vector.print %dense_v : vector<6x6x6xf32>
 
-  // CHECK-NEXT:( ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ) )
-  %1 = sparse_tensor.convert %CCC_ret
-    : tensor<?x?x?xf32, #CCC> to tensor<?x?x?xf32>
-  %v1 = vector.transfer_read %1[%c0, %c0, %c0], %zero
-      : tensor<?x?x?xf32>, vector<6x6x6xf32>
-  vector.print %v1 : vector<6x6x6xf32>
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 216
+  // CHECK-NEXT: dim = ( 6, 6, 6 )
+  // CHECK-NEXT: lvl = ( 6, 6, 6 )
+  // CHECK-NEXT: pos[0] : ( 0, 6
+  // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+  // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78,
+  // CHECK-SAME:            84, 90, 96, 102, 108, 114, 120, 126, 132, 138, 144, 150,
+  // CHECK-SAME:            156, 162, 168, 174, 180, 186, 192, 198, 204, 210, 216
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0,
+  // CHECK-SAME:            1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
+  // CHECK-SAME:            3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4,
+  // CHECK-SAME:            5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0,
+  // CHECK-SAME:            1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
+  // CHECK-SAME:            3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4,
+  // CHECK-SAME:            5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: values : ( 108, 108, 108, 108, 108, 108, 124, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            124, 108, 108, 108, 108, 108, 124, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CCC_ret : tensor<?x?x?xf32, #CCC>
 
-  // CHECK-NEXT:( ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ) )
-  %2 = sparse_tensor.convert %CCC_ret
-    : tensor<?x?x?xf32, #CCC> to tensor<?x?x?xf32>
-  %v2 = vector.transfer_read %2[%c0, %c0, %c0], %zero
-      : tensor<?x?x?xf32>, vector<6x6x6xf32>
-  vector.print %v2 : vector<6x6x6xf32>
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 216
+  // CHECK-NEXT: dim = ( 6, 6, 6 )
+  // CHECK-NEXT: lvl = ( 6, 6, 6 )
+  // CHECK-NEXT: pos[0] : ( 0, 6
+  // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84,
+  // CHECK-SAME:            90, 96, 102, 108, 114, 120, 126, 132, 138, 144, 150, 156,
+  // CHECK-SAME:            162, 168, 174, 180, 186, 192, 198, 204, 210, 216
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: values : ( 108, 108, 108, 108, 108, 108, 124, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            124, 108, 108, 108, 108, 108, 124, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CDC_ret : tensor<?x?x?xf32, #CDC>
 
-  // CHECK-NEXT:( ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ) )
-  %3 = sparse_tensor.convert %DDC_ret
-    : tensor<?x?x?xf32, #DDC> to tensor<?x?x?xf32>
-  %v3 = vector.transfer_read %3[%c0, %c0, %c0], %zero
-      : tensor<?x?x?xf32>, vector<6x6x6xf32>
-  vector.print %v2 : vector<6x6x6xf32>
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 216
+  // CHECK-NEXT: dim = ( 6, 6, 6 )
+  // CHECK-NEXT: lvl = ( 6, 6, 6 )
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90,
+  // CHECK-SAME:            96, 102, 108, 114, 120, 126, 132, 138, 144, 150, 156, 162,
+  // CHECK-SAME:            168, 174, 180, 186, 192, 198, 204, 210, 216
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: values : ( 108, 108, 108, 108, 108, 108, 124, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            124, 108, 108, 108, 108, 108, 124, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %DDC_ret : tensor<?x?x?xf32, #DDC>
 
-  // CHECK-NEXT:( ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 124, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ),
-  // CHECK-SAME:  ( ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ),
-  // CHECK-SAME:    ( 108, 108, 108, 108, 108, 108 ) ) )
-  %4 = sparse_tensor.convert %DCC_ret
-    : tensor<?x?x?xf32, #DCC> to tensor<?x?x?xf32>
-  %v4 = vector.transfer_read %3[%c0, %c0, %c0], %zero
-      : tensor<?x?x?xf32>, vector<6x6x6xf32>
-  vector.print %v2 : vector<6x6x6xf32>
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 216
+  // CHECK-NEXT: dim = ( 6, 6, 6 )
+  // CHECK-NEXT: lvl = ( 6, 6, 6 )
+  // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
+  // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90,
+  // CHECK-SAME:            96, 102, 108, 114, 120, 126, 132, 138, 144, 150, 156, 162,
+  // CHECK-SAME:            168, 174, 180, 186, 192, 198, 204, 210, 216
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: values : ( 108, 108, 108, 108, 108, 108, 124, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            124, 108, 108, 108, 108, 108, 124, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %DCC_ret : tensor<?x?x?xf32, #DCC>
 
   // Free the resources
   bufferization.dealloc_tensor %in3D : tensor<?x?x?xf32>
@@ -349,10 +356,5 @@ func.func @entry() {
   bufferization.dealloc_tensor %DDC_ret : tensor<?x?x?xf32, #DDC>
   bufferization.dealloc_tensor %DCC_ret : tensor<?x?x?xf32, #DCC>
 
-  bufferization.dealloc_tensor %1 : tensor<?x?x?xf32>
-  bufferization.dealloc_tensor %2 : tensor<?x?x?xf32>
-  bufferization.dealloc_tensor %3 : tensor<?x?x?xf32>
-  bufferization.dealloc_tensor %4 : tensor<?x?x?xf32>
-
   return
 }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir
index 346a1436928971..f68e429a3c821e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -83,7 +83,7 @@ func.func @conv_3d_ndhwc_dhwcf_CDCDC(%arg0: tensor<?x?x?x?x?xf32, #CDCDC>,
   return %ret : tensor<?x?x?x?x?xf32, #CDCDC>
 }
 
-func.func @entry() {
+func.func @main() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
@@ -150,93 +150,134 @@ func.func @entry() {
       : (tensor<?x?x?x?x?xf32, #CCCCC>,
          tensor<?x?x?x?x?xf32>) -> (tensor<?x?x?x?x?xf32, #CCCCC>)
 
-  // CHECK-NEXT:( ( ( ( ( 108 ), ( 124 ), ( 124 ), ( 124 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ) ) )
-  %1 = sparse_tensor.convert %CCCCC_ret
-    : tensor<?x?x?x?x?xf32, #CCCCC> to tensor<?x?x?x?x?xf32>
-  %v1 = vector.transfer_read %1[%c0, %c0, %c0, %c0, %c0], %zero
-      : tensor<?x?x?x?x?xf32>, vector<1x6x6x6x1xf32>
-  vector.print %v1 : vector<1x6x6x6x1xf32>
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 216
+  // CHECK-NEXT: dim = ( 1, 6, 6, 6, 1 )
+  // CHECK-NEXT: lvl = ( 1, 6, 6, 6, 1 )
+  // CHECK-NEXT: pos[0] : ( 0, 1
+  // CHECK-NEXT: crd[0] : ( 0
+  // CHECK-NEXT: pos[1] : ( 0, 6
+  // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[3] : ( 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96,
+  // CHECK-SAME:            102, 108, 114, 120, 126, 132, 138, 144, 150, 156, 162, 168, 174,
+  // CHECK-SAME:            180, 186, 192, 198, 204, 210, 216
+  // CHECK-NEXT: crd[3] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+  // CHECK-SAME:            2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+  // CHECK-SAME:            0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[4] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+  // CHECK-SAME:            19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+  // CHECK-SAME:            36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+  // CHECK-SAME:            53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+  // CHECK-SAME:            70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+  // CHECK-SAME:            87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+  // CHECK-SAME:            103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
+  // CHECK-SAME:            117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+  // CHECK-SAME:            131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+  // CHECK-SAME:            145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+  // CHECK-SAME:            159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+  // CHECK-SAME:            173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+  // CHECK-SAME:            187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+  // CHECK-SAME:            201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+  // CHECK-SAME:            215, 216
+  // CHECK-NEXT: crd[4] : ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0
+  // CHECK-NEXT: values : ( 108, 124, 124, 124, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CCCCC_ret : tensor<?x?x?x?x?xf32, #CCCCC>
 
   %CDCDC_ret = call @conv_3d_ndhwc_dhwcf_CDCDC(%in3D_ndhwc_CDCDC, %filter3D_ndhwc)
       : (tensor<?x?x?x?x?xf32, #CDCDC>,
          tensor<?x?x?x?x?xf32>) -> (tensor<?x?x?x?x?xf32, #CDCDC>)
 
-  // CHECK-NEXT:( ( ( ( ( 108 ), ( 124 ), ( 124 ), ( 124 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ),
-  // CHECK-SAME:    ( ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ),
-  // CHECK-SAME:      ( ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ), ( 108 ) ) ) ) )
-  %2 = sparse_tensor.convert %CDCDC_ret
-    : tensor<?x?x?x?x?xf32, #CDCDC> to tensor<?x?x?x?x?xf32>
-  %v2 = vector.transfer_read %dense_ret[%c0, %c0, %c0, %c0, %c0], %zero
-      : tensor<?x?x?x?x?xf32>, vector<1x6x6x6x1xf32>
-  vector.print %v2 : vector<1x6x6x6x1xf32>
+  //
+  // CHECK:      ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 216
+  // CHECK-NEXT: dim = ( 1, 6, 6, 6, 1 )
+  // CHECK-NEXT: lvl = ( 1, 6, 6, 6, 1 )
+  // CHECK-NEXT: pos[0] : ( 0, 1
+  // CHECK-NEXT: crd[0] : ( 0
+  // CHECK-NEXT: pos[2] : ( 0, 6, 12, 18, 24, 30, 36
+  // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+  // CHECK-SAME:            4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
+  // CHECK-NEXT: pos[4] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+  // CHECK-SAME:            19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+  // CHECK-SAME:            36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+  // CHECK-SAME:            53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+  // CHECK-SAME:            70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+  // CHECK-SAME:            87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+  // CHECK-SAME:            103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
+  // CHECK-SAME:            117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+  // CHECK-SAME:            131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+  // CHECK-SAME:            145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+  // CHECK-SAME:            159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+  // CHECK-SAME:            173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+  // CHECK-SAME:            187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+  // CHECK-SAME:            201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+  // CHECK-SAME:            215, 216
+  // CHECK-NEXT: crd[4] : ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  // CHECK-SAME:            0, 0, 0, 0, 0, 0, 0, 0, 0
+  // CHECK-NEXT: values : ( 108, 124, 124, 124, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
+  // CHECK-SAME:            108, 108, 108, 108, 108, 108
+  // CHECK-NEXT: ----
+  //
+  sparse_tensor.print %CDCDC_ret : tensor<?x?x?x?x?xf32, #CDCDC>
 
   // Free the resources
   bufferization.dealloc_tensor %in3D_ndhwc : tensor<?x?x?x?x?xf32>
@@ -249,8 +290,5 @@ func.func @entry() {
   bufferization.dealloc_tensor %CCCCC_ret : tensor<?x?x?x?x?xf32, #CCCCC>
   bufferization.dealloc_tensor %CDCDC_ret : tensor<?x?x?x?x?xf32, #CDCDC>
 
-  bufferization.dealloc_tensor %1 : tensor<?x?x?x?x?xf32>
-  bufferization.dealloc_tensor %2 : tensor<?x?x?x?x?xf32>
-
   return
 }



More information about the Mlir-commits mailing list