[Mlir-commits] [mlir] [mlir][sparse] migration to sparse_tensor.print (PR #83923)

Aart Bik llvmlistbot at llvm.org
Mon Mar 4 15:15:32 PST 2024


https://github.com/aartbik created https://github.com/llvm/llvm-project/pull/83923

Continuing the efforts started in #83357

>From d0264ca46adc3e691ea616b99bdd5594750d45c8 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Mon, 4 Mar 2024 15:14:21 -0800
Subject: [PATCH] [mlir][sparse] migration to sparse_tensor.print

Continuing the efforts started in #83357
---
 .../SparseTensor/CPU/block_majors.mlir        |   0
 .../Dialect/SparseTensor/CPU/sparse_abs.mlir  |  37 +--
 .../SparseTensor/CPU/sparse_binary.mlir       | 260 ++++++++++--------
 .../SparseTensor/CPU/sparse_block3d.mlir      |   0
 .../SparseTensor/CPU/sparse_conv_2d_55.mlir   |   0
 .../SparseTensor/CPU/sparse_pack_d.mlir       |   0
 .../SparseTensor/CPU/sparse_print.mlir        |   0
 7 files changed, 165 insertions(+), 132 deletions(-)
 mode change 100755 => 100644 mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
 mode change 100755 => 100644 mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir
 mode change 100755 => 100644 mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_55.mlir
 mode change 100755 => 100644 mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
 mode change 100755 => 100644 mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir

diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
old mode 100755
new mode 100644
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
index 3c10e2662f868a..4228bcdb1c0d70 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -73,7 +73,7 @@ module {
   }
 
   // Driver method to call and verify sign kernel.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
     %df = arith.constant 99.99 : f64
     %di = arith.constant 9999 : i32
@@ -116,21 +116,26 @@ module {
     //
     // Verify the results.
     //
-    // CHECK:       12
-    // CHECK-NEXT: ( 1.5, 1.5, 10.2, 11.3, 1, 1, nan, nan, inf, inf, 0, 0 )
-    // CHECK-NEXT:  9
-    // CHECK-NEXT: ( -2147483648, 2147483647, 1000, 1, 0, 1, 1000, 2147483646, 2147483647 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 12,
+    // CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 20, 21, 28, 29, 31,
+    // CHECK-NEXT: values : ( 1.5, 1.5, 10.2, 11.3, 1, 1, nan, nan, inf, inf, 0, 0,
+    // CHECK-NEXT: ----
     //
-    %x = sparse_tensor.values %0 : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %y = sparse_tensor.values %1 : tensor<?xi32, #SparseVector> to memref<?xi32>
-    %a = vector.transfer_read %x[%c0], %df: memref<?xf64>, vector<12xf64>
-    %b = vector.transfer_read %y[%c0], %di: memref<?xi32>, vector<9xi32>
-    %na = sparse_tensor.number_of_entries %0 : tensor<?xf64, #SparseVector>
-    %nb = sparse_tensor.number_of_entries %1 : tensor<?xi32, #SparseVector>
-    vector.print %na : index
-    vector.print %a : vector<12xf64>
-    vector.print %nb : index
-    vector.print %b : vector<9xi32>
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9,
+    // CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 21, 31,
+    // CHECK-NEXT: values : ( -2147483648, 2147483647, 1000, 1, 0, 1, 1000, 2147483646, 2147483647,
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %0 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %1 : tensor<?xi32, #SparseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir
index 59ecbfdef85043..36701b4385a2a8 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -365,84 +365,8 @@ module {
     return %0 : tensor<4x4xf64, #DCSR>
   }
 
-  //
-  // Utility functions to dump the value of a tensor.
-  //
-
-  func.func @dump_vec(%arg0: tensor<?xf64, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0.0 : f64
-    %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
-    vector.print %1 : vector<16xf64>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
-    %3 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<32xf64>
-    vector.print %3 : vector<32xf64>
-    bufferization.dealloc_tensor %dv : tensor<?xf64>
-    return
-  }
-
-  func.func @dump_vec_i32(%arg0: tensor<?xi32, #SparseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant 0 : i32
-    %0 = sparse_tensor.values %arg0 : tensor<?xi32, #SparseVector> to memref<?xi32>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xi32>, vector<24xi32>
-    vector.print %1 : vector<24xi32>
-    // Dump the dense vector to verify structure is correct.
-    %dv = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
-    %3 = vector.transfer_read %dv[%c0], %d0: tensor<?xi32>, vector<32xi32>
-    vector.print %3 : vector<32xi32>
-    bufferization.dealloc_tensor %dv : tensor<?xi32>
-    return
-  }
-
-  func.func @dump_mat(%arg0: tensor<?x?xf64, #DCSR>) {
-    %d0 = arith.constant 0.0 : f64
-    %c0 = arith.constant 0 : index
-    %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64>
-    %1 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<4x8xf64>
-    vector.print %1 : vector<4x8xf64>
-    bufferization.dealloc_tensor %dm : tensor<?x?xf64>
-    return
-  }
-
-  func.func @dump_mat_4x4(%A: tensor<4x4xf64, #DCSR>) {
-    %c0 = arith.constant 0 : index
-    %du = arith.constant 0.0 : f64
-
-    %c = sparse_tensor.convert %A : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
-    %v = vector.transfer_read %c[%c0, %c0], %du: tensor<4x4xf64>, vector<4x4xf64>
-    vector.print %v : vector<4x4xf64>
-
-    %1 = sparse_tensor.values %A : tensor<4x4xf64, #DCSR> to memref<?xf64>
-    %2 = vector.transfer_read %1[%c0], %du: memref<?xf64>, vector<16xf64>
-    vector.print %2 : vector<16xf64>
-
-    bufferization.dealloc_tensor %c : tensor<4x4xf64>
-    return
-  }
-
-  func.func @dump_mat_4x4_i8(%A: tensor<4x4xi8, #DCSR>) {
-    %c0 = arith.constant 0 : index
-    %du = arith.constant 0 : i8
-
-    %c = sparse_tensor.convert %A : tensor<4x4xi8, #DCSR> to tensor<4x4xi8>
-    %v = vector.transfer_read %c[%c0, %c0], %du: tensor<4x4xi8>, vector<4x4xi8>
-    vector.print %v : vector<4x4xi8>
-
-    %1 = sparse_tensor.values %A : tensor<4x4xi8, #DCSR> to memref<?xi8>
-    %2 = vector.transfer_read %1[%c0], %du: memref<?xi8>, vector<16xi8>
-    vector.print %2 : vector<16xi8>
-
-    bufferization.dealloc_tensor %c : tensor<4x4xi8>
-    return
-  }
-
   // Driver method to call and verify kernels.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
 
     // Setup sparse vectors.
@@ -525,45 +449,149 @@ module {
     //
     // Verify the results.
     //
-    // CHECK:      ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
-    // CHECK-NEXT: ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 11, 0, 12, 13, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 15, 0, 16, 0, 0, 17, 0, 0, 0, 0, 0, 0, 18, 19, 0, 20 )
-    // CHECK-NEXT: ( 1, 11, 2, 13, 14, 3, 15, 4, 16, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
-    // CHECK-NEXT: ( 0, 6, 3, 28, 0, 6, 56, 72, 9, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 28, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 56, 72, 0, 9 )
-    // CHECK-NEXT: ( 1, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 3, 11, 17, 20, 21, 28, 29, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 17, 0, 0, 20, 21, 0, 0, 0, 0, 0, 0, 28, 29, 0, 31 )
-    // CHECK-NEXT: ( ( 7, 0, 0, 0, 0, 0, 0, -5 ), ( -4, 0, 0, 0, 0, 0, -3, 0 ), ( 0, -2, 0, 0, 0, 0, 0, 7 ), ( 0, 0, 0, 0, 0, 0, 0, 0 ) )
-    // CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( 1, 5, 2, 4 ), ( 5, 4, 0, 0 ) )
-    // CHECK-NEXT:   ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( 1, 5, 2, 4 ), ( 5, 4, 0, 0 ) )
-    // CHECK-NEXT:   ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( -1, -5, 2, 4 ), ( 1, 4, 0, 0 ) )
-    // CHECK-NEXT:   ( 2, 4, 1, 2.5, -1, -5, 2, 4, 1, 4, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 0, 0, 1, -1 ), ( 0, 1, 0, 0 ), ( -1, -2, -2, 2 ), ( 1, 2, 0, 0 ) )
-    // CHECK-NEXT:   ( 0, 1, -1, 1, -1, -2, -2, 2, 1, 2, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 1, 0, 0, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 0, 0 ) )
-    // CHECK-NEXT:   ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
-    // CHECK-NEXT: ( ( 0, 0, 0, -1 ), ( 0, 0, 0, 0 ), ( -1, -5, -2, 4 ), ( 0, 4, 0, 0 ) )
-    // CHECK-NEXT:   ( -1, -1, -5, -2, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9,
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31,
+    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 10
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 10,
+    // CHECK-NEXT: crd[0] : ( 1, 3, 4, 10, 16, 18, 21, 28, 29, 31,
+    // CHECK-NEXT: values : ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 14
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 14,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 3, 4, 10, 11, 16, 17, 18, 20, 21, 28, 29, 31,
+    // CHECK-NEXT: values : ( 1, 11, 2, 13, 14, 3, 15, 4, 16, 5, 6, 7, 8, 9,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9,
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31,
+    // CHECK-NEXT: values : ( 0, 6, 3, 28, 0, 6, 56, 72, 9,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 4
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 4,
+    // CHECK-NEXT: crd[0] : ( 0, 11, 17, 20,
+    // CHECK-NEXT: values : ( 1, 3, 4, 5,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 9
+    // CHECK-NEXT: dim = ( 32 )
+    // CHECK-NEXT: lvl = ( 32 )
+    // CHECK-NEXT: pos[0] : ( 0, 9,
+    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31,
+    // CHECK-NEXT: values : ( 0, 3, 11, 17, 20, 21, 28, 29, 31,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 4, 8 )
+    // CHECK-NEXT: lvl = ( 4, 8 )
+    // CHECK-NEXT: pos[0] : ( 0, 3,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2,
+    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6,
+    // CHECK-NEXT: crd[1] : ( 0, 7, 0, 6, 1, 7,
+    // CHECK-NEXT: values : ( 7, -5, -4, -3, -2, 7,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 10
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 4,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3,
+    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1,
+    // CHECK-NEXT: values : ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 10
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 4,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3,
+    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1,
+    // CHECK-NEXT: values : ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 10
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 4,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3,
+    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1,
+    // CHECK-NEXT: values : ( 2, 4, 1, 2.5, -1, -5, 2, 4, 1, 4,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 10
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 4,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3,
+    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1,
+    // CHECK-NEXT: values : ( 0, 1, -1, 1, -1, -2, -2, 2, 1, 2,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 4
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 3,
+    // CHECK-NEXT: crd[0] : ( 0, 1, 3,
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 4,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0,
+    // CHECK-NEXT: values : ( 1, 0, 0, 0,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 6
+    // CHECK-NEXT: dim = ( 4, 4 )
+    // CHECK-NEXT: lvl = ( 4, 4 )
+    // CHECK-NEXT: pos[0] : ( 0, 3,
+    // CHECK-NEXT: crd[0] : ( 0, 2, 3,
+    // CHECK-NEXT: pos[1] : ( 0, 1, 5, 6,
+    // CHECK-NEXT: crd[1] : ( 3, 0, 1, 2, 3, 1,
+    // CHECK-NEXT: values : ( -1, -1, -5, -2, 4, 4,
     //
-    call @dump_vec(%sv1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_vec(%sv2) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_vec_i32(%0) : (tensor<?xi32, #SparseVector>) -> ()
-    call @dump_vec(%1) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_vec(%2) : (tensor<?xf64, #SparseVector>) -> ()
-    call @dump_vec_i32(%3) : (tensor<?xi32, #SparseVector>) -> ()
-    call @dump_mat(%5) : (tensor<?x?xf64, #DCSR>) -> ()
-    call @dump_mat_4x4(%6) : (tensor<4x4xf64, #DCSR>) -> ()
-    call @dump_mat_4x4(%7) : (tensor<4x4xf64, #DCSR>) -> ()
-    call @dump_mat_4x4(%8) : (tensor<4x4xf64, #DCSR>) -> ()
-    call @dump_mat_4x4(%9) : (tensor<4x4xf64, #DCSR>) -> ()
-    call @dump_mat_4x4_i8(%10) : (tensor<4x4xi8, #DCSR>) -> ()
-    call @dump_mat_4x4(%11) : (tensor<4x4xf64, #DCSR>) -> ()
+    sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %sv2 : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %0   : tensor<?xi32, #SparseVector>
+    sparse_tensor.print %1   : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %2   : tensor<?xf64, #SparseVector>
+    sparse_tensor.print %3   : tensor<?xi32, #SparseVector>
+    sparse_tensor.print %5   : tensor<?x?xf64, #DCSR>
+    sparse_tensor.print %6   : tensor<4x4xf64, #DCSR>
+    sparse_tensor.print %7   : tensor<4x4xf64, #DCSR>
+    sparse_tensor.print %8   : tensor<4x4xf64, #DCSR>
+    sparse_tensor.print %9   : tensor<4x4xf64, #DCSR>
+    sparse_tensor.print %10  : tensor<4x4xi8, #DCSR>
+    sparse_tensor.print %11  : tensor<4x4xf64, #DCSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir
old mode 100755
new mode 100644
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_55.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_55.mlir
old mode 100755
new mode 100644
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
old mode 100755
new mode 100644
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
old mode 100755
new mode 100644



More information about the Mlir-commits mailing list