[Mlir-commits] [mlir] [mlir][sparse] migrate integration tests to sparse_tensor.print (PR #83357)

Aart Bik llvmlistbot at llvm.org
Wed Feb 28 16:02:54 PST 2024


https://github.com/aartbik created https://github.com/llvm/llvm-project/pull/83357

This is first step (of many) cleaning up our tests to use the new and exciting sparse_tensor.print operation instead of lengthy extraction + print ops.

>From ebf64cc7f945a62336bbbe80632861fa4188bd38 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Wed, 28 Feb 2024 15:53:18 -0800
Subject: [PATCH] [mlir][sparse] migrate integration tests to
 sparse_tensor.print

This is first step (of many) cleaning up our tests to use
the new and exciting sparse_tensor.print operation instead
of lengthy extraction + print ops.
---
 .../Dialect/SparseTensor/CPU/block.mlir       | 45 +++++-----
 .../SparseTensor/CPU/block_majors.mlir        | 85 ++++++++-----------
 .../SparseTensor/CPU/dense_output.mlir        | 18 ++--
 .../SparseTensor/CPU/dense_output_bf16.mlir   | 24 ++----
 .../SparseTensor/CPU/dense_output_f16.mlir    | 24 ++----
 .../SparseTensor/CPU/sparse_re_im.mlir        | 46 +++++-----
 6 files changed, 103 insertions(+), 139 deletions(-)

diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
index 6468c4b45d2479..1184d407541b6f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -82,38 +82,39 @@ module {
     return %0 : tensor<?x?xf64, #BSR>
   }
 
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0   : index
     %f0 = arith.constant 0.0 : f64
 
     %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
     %A = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #BSR>
 
-    // CHECK:      ( 0, 2, 3 )
-    // CHECK-NEXT: ( 0, 2, 1 )
-    // CHECK-NEXT: ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
-    %pos = sparse_tensor.positions %A {level = 1 : index } : tensor<?x?xf64, #BSR> to memref<?xindex>
-    %vecp = vector.transfer_read %pos[%c0], %c0 : memref<?xindex>, vector<3xindex>
-    vector.print %vecp : vector<3xindex>
-    %crd = sparse_tensor.coordinates %A {level = 1 : index } : tensor<?x?xf64, #BSR> to memref<?xindex>
-    %vecc = vector.transfer_read %crd[%c0], %c0 : memref<?xindex>, vector<3xindex>
-    vector.print %vecc : vector<3xindex>
-    %val = sparse_tensor.values %A : tensor<?x?xf64, #BSR> to memref<?xf64>
-    %vecv = vector.transfer_read %val[%c0], %f0 : memref<?xf64>, vector<12xf64>
-    vector.print %vecv : vector<12xf64>
+    // CHECK:   ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1,
+    // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
+    // CHECK-NEXT: ----
+    sparse_tensor.print %A : tensor<?x?xf64, #BSR>
 
-    // CHECK-NEXT: ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1
+    // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
+    // CHECK-NEXT: ----
     %t1 = sparse_tensor.reinterpret_map %A : tensor<?x?xf64, #BSR>
                                           to tensor<?x?x2x2xf64, #DSDD>
-    %vdsdd = sparse_tensor.values %t1 : tensor<?x?x2x2xf64, #DSDD> to memref<?xf64>
-    %vecdsdd = vector.transfer_read %vdsdd[%c0], %f0 : memref<?xf64>, vector<12xf64>
-    vector.print %vecdsdd : vector<12xf64>
+    sparse_tensor.print %t1 : tensor<?x?x2x2xf64, #DSDD>
 
-    // CHECK-NEXT: ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0 )
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 12
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1,
+    // CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0,
+    // CHECK-NEXT: ----
     %As = call @scale(%A) : (tensor<?x?xf64, #BSR>) -> (tensor<?x?xf64, #BSR>)
-    %vals = sparse_tensor.values %As : tensor<?x?xf64, #BSR> to memref<?xf64>
-    %vecs = vector.transfer_read %vals[%c0], %f0 : memref<?xf64>, vector<12xf64>
-    vector.print %vecs : vector<12xf64>
+    sparse_tensor.print %As : tensor<?x?xf64, #BSR>
 
     // Release the resources.
     bufferization.dealloc_tensor %A: tensor<?x?xf64, #BSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
index cb06f099dd3703..f8e83b5019679f 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
@@ -102,9 +102,15 @@
 //
 module {
 
-  // CHECK:      ( 0, 1, 2 )
-  // CHECK-NEXT: ( 0, 2 )
-  // CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
+
+  //
+  // CHECK: ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 24
+  // CHECK-NEXT: pos[1] : ( 0, 1, 2,
+  // CHECK-NEXT: crd[1] : ( 0, 2,
+  // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
+  // CHECK-NEXT: ----
+  //
   func.func @foo1() {
     // Build.
     %c0 = arith.constant 0   : index
@@ -115,23 +121,20 @@ module {
     > : tensor<6x16xf64>
     %s1 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_row_rowmajor>
     // Test.
-    %pos1 = sparse_tensor.positions %s1 {level = 1 : index } : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xindex>
-    %vecp1 = vector.transfer_read %pos1[%c0], %c0 : memref<?xindex>, vector<3xindex>
-    vector.print %vecp1 : vector<3xindex>
-    %crd1 = sparse_tensor.coordinates %s1 {level = 1 : index } : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xindex>
-    %vecc1 = vector.transfer_read %crd1[%c0], %c0 : memref<?xindex>, vector<2xindex>
-    vector.print %vecc1 : vector<2xindex>
-    %val1 = sparse_tensor.values %s1 : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xf64>
-    %vecv1 = vector.transfer_read %val1[%c0], %f0 : memref<?xf64>, vector<24xf64>
-    vector.print %vecv1 : vector<24xf64>
+    sparse_tensor.print %s1 : tensor<?x?xf64, #BSR_row_rowmajor>
     // Release.
     bufferization.dealloc_tensor %s1: tensor<?x?xf64, #BSR_row_rowmajor>
     return
   }
 
-  // CHECK-NEXT: ( 0, 1, 2 )
-  // CHECK-NEXT: ( 0, 2 )
-  // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
+  //
+  // CHECK-NEXT: ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 24
+  // CHECK-NEXT: pos[1] : ( 0, 1, 2,
+  // CHECK-NEXT: crd[1] : ( 0, 2,
+  // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
+  // CHECK-NEXT: ----
+  //
   func.func @foo2() {
     // Build.
     %c0 = arith.constant 0   : index
@@ -142,23 +145,20 @@ module {
     > : tensor<6x16xf64>
     %s2 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_row_colmajor>
     // Test.
-    %pos2 = sparse_tensor.positions %s2 {level = 1 : index } : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xindex>
-    %vecp2 = vector.transfer_read %pos2[%c0], %c0 : memref<?xindex>, vector<3xindex>
-    vector.print %vecp2 : vector<3xindex>
-    %crd2 = sparse_tensor.coordinates %s2 {level = 1 : index } : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xindex>
-    %vecc2 = vector.transfer_read %crd2[%c0], %c0 : memref<?xindex>, vector<2xindex>
-    vector.print %vecc2 : vector<2xindex>
-    %val2 = sparse_tensor.values %s2 : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xf64>
-    %vecv2 = vector.transfer_read %val2[%c0], %f0 : memref<?xf64>, vector<24xf64>
-    vector.print %vecv2 : vector<24xf64>
+    sparse_tensor.print %s2 : tensor<?x?xf64, #BSR_row_colmajor>
     // Release.
     bufferization.dealloc_tensor %s2: tensor<?x?xf64, #BSR_row_colmajor>
     return
   }
 
-  // CHECK-NEXT: ( 0, 1, 1, 2, 2 )
-  // CHECK-NEXT: ( 0, 1 )
-  // CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
+  //
+  // CHECK-NEXT: ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 24
+  // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
+  // CHECK-NEXT: crd[1] : ( 0, 1,
+  // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
+  // CHECK-NEXT: ----
+  //
   func.func @foo3() {
     // Build.
     %c0 = arith.constant 0   : index
@@ -169,23 +169,20 @@ module {
     > : tensor<6x16xf64>
     %s3 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_col_rowmajor>
     // Test.
-    %pos3 = sparse_tensor.positions %s3 {level = 1 : index } : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xindex>
-    %vecp3 = vector.transfer_read %pos3[%c0], %c0 : memref<?xindex>, vector<5xindex>
-    vector.print %vecp3 : vector<5xindex>
-    %crd3 = sparse_tensor.coordinates %s3 {level = 1 : index } : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xindex>
-    %vecc3 = vector.transfer_read %crd3[%c0], %c0 : memref<?xindex>, vector<2xindex>
-    vector.print %vecc3 : vector<2xindex>
-    %val3 = sparse_tensor.values %s3 : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xf64>
-    %vecv3 = vector.transfer_read %val3[%c0], %f0 : memref<?xf64>, vector<24xf64>
-    vector.print %vecv3 : vector<24xf64>
+    sparse_tensor.print %s3 : tensor<?x?xf64, #BSR_col_rowmajor>
     // Release.
     bufferization.dealloc_tensor %s3: tensor<?x?xf64, #BSR_col_rowmajor>
     return
   }
 
-  // CHECK-NEXT: ( 0, 1, 1, 2, 2 )
-  // CHECK-NEXT: ( 0, 1 )
-  // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
+  //
+  // CHECK-NEXT: ---- Sparse Tensor ----
+  // CHECK-NEXT: nse = 24
+  // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
+  // CHECK-NEXT: crd[1] : ( 0, 1,
+  // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
+  // CHECK-NEXT: ----
+  //
   func.func @foo4() {
     // Build.
     %c0 = arith.constant 0   : index
@@ -196,15 +193,7 @@ module {
     > : tensor<6x16xf64>
     %s4 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_col_colmajor>
     // Test.
-    %pos4 = sparse_tensor.positions %s4 {level = 1 : index } : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xindex>
-    %vecp4 = vector.transfer_read %pos4[%c0], %c0 : memref<?xindex>, vector<5xindex>
-    vector.print %vecp4 : vector<5xindex>
-    %crd4 = sparse_tensor.coordinates %s4 {level = 1 : index } : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xindex>
-    %vecc4 = vector.transfer_read %crd4[%c0], %c0 : memref<?xindex>, vector<2xindex>
-    vector.print %vecc4 : vector<2xindex>
-    %val4 = sparse_tensor.values %s4 : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xf64>
-    %vecv4 = vector.transfer_read %val4[%c0], %f0 : memref<?xf64>, vector<24xf64>
-    vector.print %vecv4 : vector<24xf64>
+    sparse_tensor.print %s4 : tensor<?x?xf64, #BSR_col_colmajor>
     // Release.
     bufferization.dealloc_tensor %s4: tensor<?x?xf64, #BSR_col_colmajor>
     return
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 5f6524a4b7af9e..c6ee0ce0705021 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -83,12 +83,11 @@ module {
   }
 
   func.func private @getTensorFilename(index) -> (!Filename)
-  func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
 
   //
   // Main driver that reads matrix from file and calls the kernel.
   //
-  func.func @entry() {
+  func.func @main() {
     %d0 = arith.constant 0.0 : f64
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
@@ -104,14 +103,13 @@ module {
 
     //
     // Print the linearized 5x5 result for verification.
-    // CHECK: 25
-    // CHECK: [2,  0,  0,  2.8,  0,  0,  4,  0,  0,  5,  0,  0,  6,  0,  0,  8.2,  0,  0,  8,  0,  0,  10.4,  0,  0,  10
     //
-    %n = sparse_tensor.number_of_entries %0 : tensor<?x?xf64, #DenseMatrix>
-    vector.print %n : index
-    %m = sparse_tensor.values %0
-      : tensor<?x?xf64, #DenseMatrix> to memref<?xf64>
-    call @printMemref1dF64(%m) : (memref<?xf64>) -> ()
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 25
+    // CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10,
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %0 : tensor<?x?xf64, #DenseMatrix>
 
     // Release the resources.
     bufferization.dealloc_tensor %a : tensor<?x?xf64, #SparseMatrix>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
index 81cd2d81cbbc32..0b34ff581016da 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -67,20 +67,8 @@ module {
     return %0 : tensor<?xbf16, #DenseVector>
   }
 
-  // Dumps a dense vector of type bf16.
-  func.func @dump_vec(%arg0: tensor<?xbf16, #DenseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant -1.0 : bf16
-    %0 = sparse_tensor.values %arg0 : tensor<?xbf16, #DenseVector> to memref<?xbf16>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xbf16>, vector<32xbf16>
-    %f1 = arith.extf %1: vector<32xbf16> to vector<32xf32>
-    vector.print %f1 : vector<32xf32>
-    return
-  }
-
   // Driver method to call and verify the kernel.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
 
     // Setup sparse vectors.
@@ -103,8 +91,12 @@ module {
     //
     // Verify the result.
     //
-    // CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
-    call @dump_vec(%0) : (tensor<?xbf16, #DenseVector>) -> ()
+    // CHECK: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 32
+    // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %0 : tensor<?xbf16, #DenseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xbf16, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
index b320afdb885842..495682169c2909 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -68,20 +68,8 @@ module {
     return %0 : tensor<?xf16, #DenseVector>
   }
 
-  // Dumps a dense vector of type f16.
-  func.func @dump_vec(%arg0: tensor<?xf16, #DenseVector>) {
-    // Dump the values array to verify only sparse contents are stored.
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant -1.0 : f16
-    %0 = sparse_tensor.values %arg0 : tensor<?xf16, #DenseVector> to memref<?xf16>
-    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf16>, vector<32xf16>
-    %f1 = arith.extf %1: vector<32xf16> to vector<32xf32>
-    vector.print %f1 : vector<32xf32>
-    return
-  }
-
   // Driver method to call and verify the kernel.
-  func.func @entry() {
+  func.func @main() {
     %c0 = arith.constant 0 : index
 
     // Setup sparse vectors.
@@ -104,8 +92,12 @@ module {
     //
     // Verify the result.
     //
-    // CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
-    call @dump_vec(%0) : (tensor<?xf16, #DenseVector>) -> ()
+    // CHECK:      ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 32
+    // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
+    // CHECK-NEXT: ----
+    //
+    sparse_tensor.print %0 : tensor<?xf16, #DenseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xf16, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
index b44ffc30c3b1ee..1860fc1c7027a1 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
@@ -10,7 +10,7 @@
 // DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
 // DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
 // DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
 // DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
 // DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
 //
@@ -72,22 +72,7 @@ module {
     return %0 : tensor<?xf32, #SparseVector>
   }
 
-  func.func @dump(%arg0: tensor<?xf32, #SparseVector>) {
-    %c0 = arith.constant 0 : index
-    %d0 = arith.constant -1.0 : f32
-    %n = sparse_tensor.number_of_entries %arg0 : tensor<?xf32, #SparseVector>
-    vector.print %n : index
-    %values = sparse_tensor.values %arg0 : tensor<?xf32, #SparseVector> to memref<?xf32>
-    %0 = vector.transfer_read %values[%c0], %d0: memref<?xf32>, vector<3xf32>
-    vector.print %0 : vector<3xf32>
-    %coordinates = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<?xf32, #SparseVector> to memref<?xindex>
-    %1 = vector.transfer_read %coordinates[%c0], %c0: memref<?xindex>, vector<3xindex>
-    vector.print %1 : vector<3xindex>
-    return
-  }
-
-  // Driver method to call and verify functions cim and cre.
-  func.func @entry() {
+  func.func @main() {
     // Setup sparse vectors.
     %v1 = arith.constant sparse<
        [ [0], [20], [31] ],
@@ -104,20 +89,27 @@ module {
     //
     // Verify the results.
     //
-    // CHECK:      3
-    // CHECK-NEXT: ( 5.13, 3, 5 )
-    // CHECK-NEXT: ( 0, 20, 31 )
-    // CHECK-NEXT: 3
-    // CHECK-NEXT: ( 2, 4, 6 )
-    // CHECK-NEXT: ( 0, 20, 31 )
+    // CHECK:    ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 3
+    // CHECK-NEXT: pos[0] : ( 0, 3,
+    // CHECK-NEXT: crd[0] : ( 0, 20, 31,
+    // CHECK-NEXT: values : ( 5.13, 3, 5,
+    // CHECK-NEXT: ----
+    //
+    // CHECK-NEXT: ---- Sparse Tensor ----
+    // CHECK-NEXT: nse = 3
+    // CHECK-NEXT: pos[0] : ( 0, 3,
+    // CHECK-NEXT: crd[0] : ( 0, 20, 31,
+    // CHECK-NEXT: values : ( 2, 4, 6,
+    // CHECK-NEXT: ----
     //
-    call @dump(%0) : (tensor<?xf32, #SparseVector>) -> ()
-    call @dump(%1) : (tensor<?xf32, #SparseVector>) -> ()
+    sparse_tensor.print %0 : tensor<?xf32, #SparseVector>
+    sparse_tensor.print %1 : tensor<?xf32, #SparseVector>
 
     // Release the resources.
     bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f32>, #SparseVector>
-    bufferization.dealloc_tensor %0 : tensor<?xf32, #SparseVector>
-    bufferization.dealloc_tensor %1 : tensor<?xf32, #SparseVector>
+    bufferization.dealloc_tensor %0   : tensor<?xf32, #SparseVector>
+    bufferization.dealloc_tensor %1   : tensor<?xf32, #SparseVector>
     return
   }
 }



More information about the Mlir-commits mailing list