[Mlir-commits] [mlir] [mlir][sparse] migrate tests to sparse_tensor.print (PR #84055)
Aart Bik
llvmlistbot at llvm.org
Tue Mar 5 10:44:37 PST 2024
https://github.com/aartbik updated https://github.com/llvm/llvm-project/pull/84055
>From 12d843b50eeda1838347f442beb18ce9f719356a Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Tue, 5 Mar 2024 10:11:09 -0800
Subject: [PATCH 1/3] [mlir][sparse] migrate tests to sparse_tensor.print
Continuing the efforts started in #83357
---
.../SparseTensor/CPU/concatenate_dim_0.mlir | 81 +++++-----
.../SparseTensor/CPU/sparse_block_matmul.mlir | 138 +++++++++---------
.../SparseTensor/CPU/sparse_complex32.mlir | 53 +++----
.../SparseTensor/CPU/sparse_complex64.mlir | 53 +++----
.../Dialect/SparseTensor/CPU/sparse_ds.mlir | 88 +++++------
5 files changed, 181 insertions(+), 232 deletions(-)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
index 515f5b88b480d2..3fa1db1a14cea2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -51,11 +51,6 @@
module {
func.func private @printMemrefF64(%ptr : tensor<*xf64>)
- func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
-
- //
- // Tests without permutation.
- //
// Concats all sparse matrices (with different encodings) to a sparse matrix.
func.func @concat_sparse_sparse(%arg0: tensor<2x4xf64, #MAT_C_C>, %arg1: tensor<3x4xf64, #MAT_C_D>, %arg2: tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C> {
@@ -85,30 +80,15 @@ module {
return %0 : tensor<9x4xf64>
}
- func.func @dump_mat_9x4(%A: tensor<9x4xf64, #MAT_C_C>) {
- %c = sparse_tensor.convert %A : tensor<9x4xf64, #MAT_C_C> to tensor<9x4xf64>
- %cu = tensor.cast %c : tensor<9x4xf64> to tensor<*xf64>
- call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
-
- %n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_C_C>
- vector.print %n : index
-
- %1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_C_C> to memref<?xf64>
- call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
-
- bufferization.dealloc_tensor %c : tensor<9x4xf64>
- return
- }
-
+ // Outputs dense matrix.
func.func @dump_mat_dense_9x4(%A: tensor<9x4xf64>) {
%u = tensor.cast %A : tensor<9x4xf64> to tensor<*xf64>
call @printMemrefF64(%u) : (tensor<*xf64>) -> ()
-
return
}
// Driver method to call and verify kernels.
- func.func @entry() {
+ func.func @main() {
%m24 = arith.constant dense<
[ [ 1.0, 0.0, 3.0, 0.0],
[ 0.0, 2.0, 0.0, 0.0] ]> : tensor<2x4xf64>
@@ -126,22 +106,24 @@ module {
%sm34cd = sparse_tensor.convert %m34 : tensor<3x4xf64> to tensor<3x4xf64, #MAT_C_D>
%sm44dc = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C>
- // CHECK: {{\[}}[1, 0, 3, 0],
- // CHECK-NEXT: [0, 2, 0, 0],
- // CHECK-NEXT: [1, 0, 1, 1],
- // CHECK-NEXT: [0, 0.5, 0, 0],
- // CHECK-NEXT: [1, 5, 2, 0],
- // CHECK-NEXT: [0, 0, 1.5, 1],
- // CHECK-NEXT: [0, 3.5, 0, 0],
- // CHECK-NEXT: [1, 5, 2, 0],
- // CHECK-NEXT: [1, 0.5, 0, 0]]
- // CHECK-NEXT: 18
- // CHECK: [1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 18
+ // CHECK-NEXT: dim = ( 9, 4 )
+ // CHECK-NEXT: lvl = ( 9, 4 )
+ // CHECK-NEXT: pos[0] : ( 0, 9,
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
+ // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
+ // CHECK-NEXT: ----
+ //
%0 = call @concat_sparse_sparse(%sm24cc, %sm34cd, %sm44dc)
: (tensor<2x4xf64, #MAT_C_C>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
- call @dump_mat_9x4(%0) : (tensor<9x4xf64, #MAT_C_C>) -> ()
+ sparse_tensor.print %0 : tensor<9x4xf64, #MAT_C_C>
- // CHECK: {{\[}}[1, 0, 3, 0],
+ //
+ // CHECK: {{\[}}[1, 0, 3, 0],
// CHECK-NEXT: [0, 2, 0, 0],
// CHECK-NEXT: [1, 0, 1, 1],
// CHECK-NEXT: [0, 0.5, 0, 0],
@@ -150,6 +132,7 @@ module {
// CHECK-NEXT: [0, 3.5, 0, 0],
// CHECK-NEXT: [1, 5, 2, 0],
// CHECK-NEXT: [1, 0.5, 0, 0]]
+ //
%1 = call @concat_sparse_dense(%sm24cc, %sm34cd, %sm44dc)
: (tensor<2x4xf64, #MAT_C_C>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
call @dump_mat_dense_9x4(%1) : (tensor<9x4xf64>) -> ()
@@ -167,22 +150,24 @@ module {
// CHECK: [1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
%2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
- call @dump_mat_9x4(%2) : (tensor<9x4xf64, #MAT_C_C>) -> ()
-
- // CHECK: {{\[}}[1, 0, 3, 0],
- // CHECK-NEXT: [0, 2, 0, 0],
- // CHECK-NEXT: [1, 0, 1, 1],
- // CHECK-NEXT: [0, 0.5, 0, 0],
- // CHECK-NEXT: [1, 5, 2, 0],
- // CHECK-NEXT: [0, 0, 1.5, 1],
- // CHECK-NEXT: [0, 3.5, 0, 0],
- // CHECK-NEXT: [1, 5, 2, 0],
- // CHECK-NEXT: [1, 0.5, 0, 0]]
+ sparse_tensor.print %2 : tensor<9x4xf64, #MAT_C_C>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 18
+ // CHECK-NEXT: dim = ( 9, 4 )
+ // CHECK-NEXT: lvl = ( 9, 4 )
+ // CHECK-NEXT: pos[0] : ( 0, 9, )
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, )
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18, )
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1, )
+ // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5, )
+ // CHECK-NEXT: ----
+ //
%3 = call @concat_mix_dense(%m24, %sm34cd, %sm44dc)
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
call @dump_mat_dense_9x4(%3) : (tensor<9x4xf64>) -> ()
-
// Release resources.
bufferization.dealloc_tensor %sm24cc : tensor<2x4xf64, #MAT_C_C>
bufferization.dealloc_tensor %sm34cd : tensor<3x4xf64, #MAT_C_D>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir
index e47ac46597b77a..464de9c8a2c3a6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -45,7 +45,6 @@
map = ( i, j ) -> (i : dense, j : compressed)
}>
-
#BSR = #sparse_tensor.encoding<{
map = ( i, j ) ->
( i floordiv 2 : dense,
@@ -65,67 +64,66 @@
module {
-func.func @mul(%arg0: tensor<4x8xf64>,
- %arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
- %out = arith.constant dense<0.0> : tensor<4x4xf64>
- %0 = linalg.generic #trait_mul
- ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #BSR>)
- outs(%out: tensor<4x4xf64>) {
- ^bb(%x: f64, %y : f64, %z : f64):
- %1 = arith.mulf %x, %y : f64
- %2 = arith.addf %1, %z : f64
- linalg.yield %2 : f64
- } -> tensor<4x4xf64>
- return %0 : tensor<4x4xf64>
-}
-
-func.func @mul_24(%arg0: tensor<4x8xf64>,
- %arg1: tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64> {
- %out = arith.constant dense<0.0> : tensor<4x4xf64>
- %0 = linalg.generic #trait_mul
- ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #NV_24>)
- outs(%out: tensor<4x4xf64>) {
- ^bb(%x: f64, %y : f64, %z : f64):
- %1 = arith.mulf %x, %y : f64
- %2 = arith.addf %1, %z : f64
- linalg.yield %2 : f64
- } -> tensor<4x4xf64>
- return %0 : tensor<4x4xf64>
-}
+ func.func @mul(%arg0: tensor<4x8xf64>,
+ %arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
+ %out = arith.constant dense<0.0> : tensor<4x4xf64>
+ %0 = linalg.generic #trait_mul
+ ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #BSR>)
+ outs(%out: tensor<4x4xf64>) {
+ ^bb(%x: f64, %y : f64, %z : f64):
+ %1 = arith.mulf %x, %y : f64
+ %2 = arith.addf %1, %z : f64
+ linalg.yield %2 : f64
+ } -> tensor<4x4xf64>
+ return %0 : tensor<4x4xf64>
+ }
-func.func @mul_csr_bsr(%arg0: tensor<4x8xf64, #CSR>,
- %arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
- %out = arith.constant dense<0.0> : tensor<4x4xf64>
- %0 = linalg.generic #trait_mul
- ins(%arg0, %arg1: tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>)
- outs(%out: tensor<4x4xf64>) {
- ^bb(%x: f64, %y : f64, %z : f64):
- %1 = arith.mulf %x, %y : f64
- %2 = arith.addf %1, %z : f64
- linalg.yield %2 : f64
- } -> tensor<4x4xf64>
- return %0 : tensor<4x4xf64>
-}
+ func.func @mul_24(%arg0: tensor<4x8xf64>,
+ %arg1: tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64> {
+ %out = arith.constant dense<0.0> : tensor<4x4xf64>
+ %0 = linalg.generic #trait_mul
+ ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #NV_24>)
+ outs(%out: tensor<4x4xf64>) {
+ ^bb(%x: f64, %y : f64, %z : f64):
+ %1 = arith.mulf %x, %y : f64
+ %2 = arith.addf %1, %z : f64
+ linalg.yield %2 : f64
+ } -> tensor<4x4xf64>
+ return %0 : tensor<4x4xf64>
+ }
-func.func @mul_dense(%arg0: tensor<4x8xf64>,
- %arg1: tensor<4x8xf64>) -> tensor<4x4xf64> {
- %out = arith.constant dense<0.0> : tensor<4x4xf64>
- %0 = linalg.generic #trait_mul
- ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64>)
- outs(%out: tensor<4x4xf64>) {
- ^bb(%x: f64, %y : f64, %z : f64):
- %1 = arith.mulf %x, %y : f64
- %2 = arith.addf %1, %z : f64
- linalg.yield %2 : f64
- } -> tensor<4x4xf64>
- return %0 : tensor<4x4xf64>
-}
+ func.func @mul_csr_bsr(%arg0: tensor<4x8xf64, #CSR>,
+ %arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
+ %out = arith.constant dense<0.0> : tensor<4x4xf64>
+ %0 = linalg.generic #trait_mul
+ ins(%arg0, %arg1: tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>)
+ outs(%out: tensor<4x4xf64>) {
+ ^bb(%x: f64, %y : f64, %z : f64):
+ %1 = arith.mulf %x, %y : f64
+ %2 = arith.addf %1, %z : f64
+ linalg.yield %2 : f64
+ } -> tensor<4x4xf64>
+ return %0 : tensor<4x4xf64>
+ }
+ func.func @mul_dense(%arg0: tensor<4x8xf64>,
+ %arg1: tensor<4x8xf64>) -> tensor<4x4xf64> {
+ %out = arith.constant dense<0.0> : tensor<4x4xf64>
+ %0 = linalg.generic #trait_mul
+ ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64>)
+ outs(%out: tensor<4x4xf64>) {
+ ^bb(%x: f64, %y : f64, %z : f64):
+ %1 = arith.mulf %x, %y : f64
+ %2 = arith.addf %1, %z : f64
+ linalg.yield %2 : f64
+ } -> tensor<4x4xf64>
+ return %0 : tensor<4x4xf64>
+ }
//
- // Output utilities.
+ // Output utility.
//
- func.func @dumpf64(%arg0: tensor<4x4xf64>) {
+ func.func @dump_dense_f64(%arg0: tensor<4x4xf64>) {
%c0 = arith.constant 0 : index
%d0 = arith.constant -1.0 : f64
%0 = vector.transfer_read %arg0[%c0, %c0], %d0: tensor<4x4xf64>, vector<4x4xf64>
@@ -136,36 +134,32 @@ func.func @mul_dense(%arg0: tensor<4x8xf64>,
//
// Main driver.
//
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %c2 = arith.constant 2 : index
-
%td = arith.constant dense<[[ 1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 4.0, 5.0],
[ 6.0, 7.0, 0.0, 0.0, 0.0, 0.0, 10.0, 11.0],
[ 0.0, 0.0, 12.0, 13.0, 16.0, 17.0, 0.0, 0.0],
[ 0.0, 0.0, 18.0, 19.0, 22.0, 23.0, 0.0, 0.0]]> : tensor<4x8xf64>
-
- %2 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #BSR>
- %3 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #NV_24>
- %4 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #CSR>
+ %a = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #BSR>
+ %b = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #NV_24>
+ %c = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #CSR>
%d = call @mul_dense(%td, %td)
: (tensor<4x8xf64>, tensor<4x8xf64>) -> tensor<4x4xf64>
- %s = call @mul(%td, %2)
+ %s = call @mul(%td, %a)
: (tensor<4x8xf64>, tensor<4x8xf64, #BSR>) -> tensor<4x4xf64>
- %s24 = call @mul_24(%td, %3)
+ %s24 = call @mul_24(%td, %b)
: (tensor<4x8xf64>, tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64>
- %scsr = call @mul_csr_bsr(%4, %2)
+ %scsr = call @mul_csr_bsr(%c, %a)
: (tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>) -> tensor<4x4xf64>
// CHECK-COUNT-4: ( ( 46, 115, 0, 0 ), ( 115, 306, 0, 0 ), ( 0, 0, 858, 1206 ), ( 0, 0, 1206, 1698 ) )
- call @dumpf64(%d) : (tensor<4x4xf64>) -> ()
- call @dumpf64(%s) : (tensor<4x4xf64>) -> ()
- call @dumpf64(%s24) : (tensor<4x4xf64>) -> ()
- call @dumpf64(%scsr) : (tensor<4x4xf64>) -> ()
+ call @dump_dense_f64(%d) : (tensor<4x4xf64>) -> ()
+ call @dump_dense_f64(%s) : (tensor<4x4xf64>) -> ()
+ call @dump_dense_f64(%s24) : (tensor<4x4xf64>) -> ()
+ call @dump_dense_f64(%scsr) : (tensor<4x4xf64>) -> ()
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
index d97b1a93359517..9747da27f9e972 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -77,22 +77,8 @@ module {
return %0 : tensor<?xcomplex<f32>, #SparseVector>
}
- func.func @dump(%arg0: tensor<?xcomplex<f32>, #SparseVector>, %d: index) {
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %mem = sparse_tensor.values %arg0 : tensor<?xcomplex<f32>, #SparseVector> to memref<?xcomplex<f32>>
- scf.for %i = %c0 to %d step %c1 {
- %v = memref.load %mem[%i] : memref<?xcomplex<f32>>
- %real = complex.re %v : complex<f32>
- %imag = complex.im %v : complex<f32>
- vector.print %real : f32
- vector.print %imag : f32
- }
- return
- }
-
// Driver method to call and verify complex kernels.
- func.func @entry() {
+ func.func @main() {
// Setup sparse vectors.
%v1 = arith.constant sparse<
[ [0], [28], [31] ],
@@ -114,23 +100,26 @@ module {
//
// Verify the results.
//
- // CHECK: 511.13
- // CHECK-NEXT: 2
- // CHECK-NEXT: 1
- // CHECK-NEXT: 0
- // CHECK-NEXT: 5
- // CHECK-NEXT: 4
- // CHECK-NEXT: 8
- // CHECK-NEXT: 6
- // CHECK-NEXT: 6
- // CHECK-NEXT: 8
- // CHECK-NEXT: 15
- // CHECK-NEXT: 18
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 4
+ // CHECK-NEXT: dim = ( 32 )
+ // CHECK-NEXT: lvl = ( 32 )
+ // CHECK-NEXT: pos[0] : ( 0, 4,
+ // CHECK-NEXT: crd[0] : ( 0, 1, 28, 31,
+ // CHECK-NEXT: values : ( ( 511.13, 2 ), ( 1, 0 ), ( 5, 4 ), ( 8, 6 ),
+ // CHECK-NEXT: ----
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 2
+ // CHECK-NEXT: dim = ( 32 )
+ // CHECK-NEXT: lvl = ( 32 )
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 28, 31,
+ // CHECK-NEXT: values : ( ( 6, 8 ), ( 15, 18 ),
+ // CHECK-NEXT: ----
//
- %d1 = arith.constant 4 : index
- %d2 = arith.constant 2 : index
- call @dump(%0, %d1) : (tensor<?xcomplex<f32>, #SparseVector>, index) -> ()
- call @dump(%1, %d2) : (tensor<?xcomplex<f32>, #SparseVector>, index) -> ()
+ sparse_tensor.print %0 : tensor<?xcomplex<f32>, #SparseVector>
+ sparse_tensor.print %1 : tensor<?xcomplex<f32>, #SparseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f32>, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
index 29008473d481ed..840895ffee3252 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -74,22 +74,8 @@ module {
return %0 : tensor<?xcomplex<f64>, #SparseVector>
}
- func.func @dump(%arg0: tensor<?xcomplex<f64>, #SparseVector>, %d: index) {
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %mem = sparse_tensor.values %arg0 : tensor<?xcomplex<f64>, #SparseVector> to memref<?xcomplex<f64>>
- scf.for %i = %c0 to %d step %c1 {
- %v = memref.load %mem[%i] : memref<?xcomplex<f64>>
- %real = complex.re %v : complex<f64>
- %imag = complex.im %v : complex<f64>
- vector.print %real : f64
- vector.print %imag : f64
- }
- return
- }
-
// Driver method to call and verify complex kernels.
- func.func @entry() {
+ func.func @main() {
// Setup sparse vectors.
%v1 = arith.constant sparse<
[ [0], [28], [31] ],
@@ -111,23 +97,26 @@ module {
//
// Verify the results.
//
- // CHECK: 511.13
- // CHECK-NEXT: 2
- // CHECK-NEXT: 1
- // CHECK-NEXT: 0
- // CHECK-NEXT: 5
- // CHECK-NEXT: 4
- // CHECK-NEXT: 8
- // CHECK-NEXT: 6
- // CHECK-NEXT: 6
- // CHECK-NEXT: 8
- // CHECK-NEXT: 15
- // CHECK-NEXT: 18
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 4
+ // CHECK-NEXT: dim = ( 32 )
+ // CHECK-NEXT: lvl = ( 32 )
+ // CHECK-NEXT: pos[0] : ( 0, 4, )
+ // CHECK-NEXT: crd[0] : ( 0, 1, 28, 31, )
+ // CHECK-NEXT: values : ( ( 511.13, 2 ), ( 1, 0 ), ( 5, 4 ), ( 8, 6 ), )
+ // CHECK-NEXT: ----
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 2
+ // CHECK-NEXT: dim = ( 32 )
+ // CHECK-NEXT: lvl = ( 32 )
+ // CHECK-NEXT: pos[0] : ( 0, 2, )
+ // CHECK-NEXT: crd[0] : ( 28, 31, )
+ // CHECK-NEXT: values : ( ( 6, 8 ), ( 15, 18 ), )
+ // CHECK-NEXT: ----
//
- %d1 = arith.constant 4 : index
- %d2 = arith.constant 2 : index
- call @dump(%0, %d1) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
- call @dump(%1, %d2) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
+ sparse_tensor.print %0 : tensor<?xcomplex<f64>, #SparseVector>
+ sparse_tensor.print %1 : tensor<?xcomplex<f64>, #SparseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f64>, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
index 251944c657cbac..cd20e0abb84b1d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -63,12 +63,10 @@ module {
// [0.0, 5.0, 6.0, 0.0, 7.0, 0.0, 0.0, 8.0],
// [9.0, 0.0, 10.0, 0.0, 11.0, 12.0, 0.0, 0.0]]
//
- func.func @entry() {
- %u0 = arith.constant 0 : i8
- %c0 = arith.constant 0 : index
- %f0 = arith.constant 0.0 : f64
-
+ func.func @main() {
+ %c0 = arith.constant 0 : index
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
+
%A1 = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #CSR>
%A2 = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #CSR_hi>
%A3 = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #NV_24>
@@ -77,62 +75,56 @@ module {
//
// CSR:
//
- // CHECK: ( 0, 4, 8, 12 )
- // CHECK-NEXT: ( 2, 3, 5, 7, 1, 2, 4, 7, 0, 2, 4, 5 )
- // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: dim = ( 3, 8 )
+ // CHECK-NEXT: lvl = ( 3, 8 )
+ // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12,
+ // CHECK-NEXT: crd[1] : ( 2, 3, 5, 7, 1, 2, 4, 7, 0, 2, 4, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ // CHECK-NEXT: ----
//
- %pos1 = sparse_tensor.positions %A1 {level = 1 : index } : tensor<?x?xf64, #CSR> to memref<?xindex>
- %vecp1 = vector.transfer_read %pos1[%c0], %c0 : memref<?xindex>, vector<4xindex>
- vector.print %vecp1 : vector<4xindex>
- %crd1 = sparse_tensor.coordinates %A1 {level = 1 : index } : tensor<?x?xf64, #CSR> to memref<?xindex>
- %vecc1 = vector.transfer_read %crd1[%c0], %c0 : memref<?xindex>, vector<12xindex>
- vector.print %vecc1 : vector<12xindex>
- %val1 = sparse_tensor.values %A1 : tensor<?x?xf64, #CSR> to memref<?xf64>
- %vecv1 = vector.transfer_read %val1[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecv1 : vector<12xf64>
+ sparse_tensor.print %A1 : tensor<?x?xf64, #CSR>
//
// CSR_hi:
//
- // CHECK-NEXT: ( 0, 4, 4, 8, 8, 12 )
- // CHECK-NEXT: ( 2, 3, 5, 7, 1, 2, 4, 7, 0, 2, 4, 5 )
- // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 )
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: dim = ( 3, 8 )
+ // CHECK-NEXT: lvl = ( 3, 8 )
+ // CHECK-NEXT: pos[1] : ( 0, 4, 4, 8, 8, 12, 12,
+ // CHECK-NEXT: crd[1] : ( 2, 3, 5, 7, 1, 2, 4, 7, 0, 2, 4, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ // CHECK-NEXT: ----
//
- %pos2 = sparse_tensor.positions %A2 {level = 1 : index } : tensor<?x?xf64, #CSR_hi> to memref<?xindex>
- %vecp2 = vector.transfer_read %pos2[%c0], %c0 : memref<?xindex>, vector<6xindex>
- vector.print %vecp2 : vector<6xindex>
- %crd2 = sparse_tensor.coordinates %A2 {level = 1 : index } : tensor<?x?xf64, #CSR_hi> to memref<?xindex>
- %vecc2 = vector.transfer_read %crd2[%c0], %c0 : memref<?xindex>, vector<12xindex>
- vector.print %vecc2 : vector<12xindex>
- %val2 = sparse_tensor.values %A2 : tensor<?x?xf64, #CSR_hi> to memref<?xf64>
- %vecv2 = vector.transfer_read %val2[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecv2 : vector<12xf64>
+ sparse_tensor.print %A2 : tensor<?x?xf64, #CSR_hi>
//
- // NV_24
+ // NV_24:
//
- // CHECK-NEXT: ( 2, 3, 1, 3, 1, 2, 0, 3, 0, 2, 0, 1 )
- // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 )
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: dim = ( 3, 8 )
+ // CHECK-NEXT: lvl = ( 3, 2, 4 )
+ // CHECK-NEXT: crd[2] : ( 2, 3, 1, 3, 1, 2, 0, 3, 0, 2, 0, 1,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ // CHECK-NEXT: ----
+ // CHECK-NEXT: ---- Sparse Tensor ----
//
- %crd3 = sparse_tensor.coordinates %A3 {level = 2 : index } : tensor<?x?xf64, #NV_24> to memref<?xi8>
- %vecc3 = vector.transfer_read %crd3[%c0], %u0 : memref<?xi8>, vector<12xi8>
- vector.print %vecc3 : vector<12xi8>
- %val3 = sparse_tensor.values %A3 : tensor<?x?xf64, #NV_24> to memref<?xf64>
- %vecv3 = vector.transfer_read %val3[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecv3 : vector<12xf64>
+ sparse_tensor.print %A3 : tensor<?x?xf64, #NV_24>
//
- // NV_58
+ // NV_58:
//
- // CHECK-NEXT: ( 2, 3, 5, 7, 1, 2, 4, 7, 0, 2, 4, 5 )
- // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 )
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: dim = ( 3, 8 )
+ // CHECK-NEXT: lvl = ( 3, 1, 8 )
+ // CHECK-NEXT: crd[2] : ( 2, 3, 5, 7, 1, 2, 4, 7, 0, 2, 4, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ // CHECK-NEXT: ----
//
- %crd4 = sparse_tensor.coordinates %A4 {level = 2 : index } : tensor<?x?xf64, #NV_58> to memref<?xi8>
- %vecc4 = vector.transfer_read %crd4[%c0], %u0 : memref<?xi8>, vector<12xi8>
- vector.print %vecc4 : vector<12xi8>
- %val4 = sparse_tensor.values %A4 : tensor<?x?xf64, #NV_58> to memref<?xf64>
- %vecv4 = vector.transfer_read %val4[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecv4 : vector<12xf64>
+ sparse_tensor.print %A4 : tensor<?x?xf64, #NV_58>
// Release the resources.
bufferization.dealloc_tensor %A1: tensor<?x?xf64, #CSR>
>From 619529f589e05a72454f9006425640d65ec6a1cf Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Tue, 5 Mar 2024 10:37:00 -0800
Subject: [PATCH 2/3] edit
---
.../SparseTensor/CPU/concatenate_dim_0.mlir | 32 +++++++++----------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
index 3fa1db1a14cea2..87a68682610a43 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
@@ -137,23 +137,8 @@ module {
: (tensor<2x4xf64, #MAT_C_C>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
call @dump_mat_dense_9x4(%1) : (tensor<9x4xf64>) -> ()
- // CHECK: {{\[}}[1, 0, 3, 0],
- // CHECK-NEXT: [0, 2, 0, 0],
- // CHECK-NEXT: [1, 0, 1, 1],
- // CHECK-NEXT: [0, 0.5, 0, 0],
- // CHECK-NEXT: [1, 5, 2, 0],
- // CHECK-NEXT: [0, 0, 1.5, 1],
- // CHECK-NEXT: [0, 3.5, 0, 0],
- // CHECK-NEXT: [1, 5, 2, 0],
- // CHECK-NEXT: [1, 0.5, 0, 0]]
- // CHECK-NEXT: 18
- // CHECK: [1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
- %2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)
- : (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
- sparse_tensor.print %2 : tensor<9x4xf64, #MAT_C_C>
-
//
- // CHECK: ---- Sparse Tensor ----
+ // CHECK: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 18
// CHECK-NEXT: dim = ( 9, 4 )
// CHECK-NEXT: lvl = ( 9, 4 )
@@ -163,7 +148,22 @@ module {
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1, )
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5, )
// CHECK-NEXT: ----
+ %2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)
+ : (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
+ sparse_tensor.print %2 : tensor<9x4xf64, #MAT_C_C>
+
+ //
+ // CHECK: {{\[}}[1, 0, 3, 0],
+ // CHECK-NEXT: [0, 2, 0, 0],
+ // CHECK-NEXT: [1, 0, 1, 1],
+ // CHECK-NEXT: [0, 0.5, 0, 0],
+ // CHECK-NEXT: [1, 5, 2, 0],
+ // CHECK-NEXT: [0, 0, 1.5, 1],
+ // CHECK-NEXT: [0, 3.5, 0, 0],
+ // CHECK-NEXT: [1, 5, 2, 0],
+ // CHECK-NEXT: [1, 0.5, 0, 0]]
//
+ %1 = call @concat_sparse_dense(%sm24cc, %sm34cd, %sm44dc)
%3 = call @concat_mix_dense(%m24, %sm34cd, %sm44dc)
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
call @dump_mat_dense_9x4(%3) : (tensor<9x4xf64>) -> ()
>From aac2d1592f9225b41e07207018f7303c05fd4c79 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Tue, 5 Mar 2024 10:44:17 -0800
Subject: [PATCH 3/3] edit
---
.../Dialect/SparseTensor/CPU/concatenate_dim_0.mlir | 11 +++++------
.../Dialect/SparseTensor/CPU/sparse_complex64.mlir | 12 ++++++------
.../Dialect/SparseTensor/CPU/sparse_ds.mlir | 2 +-
3 files changed, 12 insertions(+), 13 deletions(-)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
index 87a68682610a43..f1309c518de037 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
@@ -142,11 +142,11 @@ module {
// CHECK-NEXT: nse = 18
// CHECK-NEXT: dim = ( 9, 4 )
// CHECK-NEXT: lvl = ( 9, 4 )
- // CHECK-NEXT: pos[0] : ( 0, 9, )
- // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, )
- // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18, )
- // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1, )
- // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5, )
+ // CHECK-NEXT: pos[0] : ( 0, 9,
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
+ // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
// CHECK-NEXT: ----
%2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
@@ -163,7 +163,6 @@ module {
// CHECK-NEXT: [1, 5, 2, 0],
// CHECK-NEXT: [1, 0.5, 0, 0]]
//
- %1 = call @concat_sparse_dense(%sm24cc, %sm34cd, %sm44dc)
%3 = call @concat_mix_dense(%m24, %sm34cd, %sm44dc)
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
call @dump_mat_dense_9x4(%3) : (tensor<9x4xf64>) -> ()
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
index 840895ffee3252..d4b43eb5767624 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
@@ -101,18 +101,18 @@ module {
// CHECK-NEXT: nse = 4
// CHECK-NEXT: dim = ( 32 )
// CHECK-NEXT: lvl = ( 32 )
- // CHECK-NEXT: pos[0] : ( 0, 4, )
- // CHECK-NEXT: crd[0] : ( 0, 1, 28, 31, )
- // CHECK-NEXT: values : ( ( 511.13, 2 ), ( 1, 0 ), ( 5, 4 ), ( 8, 6 ), )
+ // CHECK-NEXT: pos[0] : ( 0, 4,
+ // CHECK-NEXT: crd[0] : ( 0, 1, 28, 31,
+ // CHECK-NEXT: values : ( ( 511.13, 2 ), ( 1, 0 ), ( 5, 4 ), ( 8, 6 ),
// CHECK-NEXT: ----
//
// CHECK-NEXT: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 2
// CHECK-NEXT: dim = ( 32 )
// CHECK-NEXT: lvl = ( 32 )
- // CHECK-NEXT: pos[0] : ( 0, 2, )
- // CHECK-NEXT: crd[0] : ( 28, 31, )
- // CHECK-NEXT: values : ( ( 6, 8 ), ( 15, 18 ), )
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 28, 31,
+ // CHECK-NEXT: values : ( ( 6, 8 ), ( 15, 18 ),
// CHECK-NEXT: ----
//
sparse_tensor.print %0 : tensor<?xcomplex<f64>, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
index cd20e0abb84b1d..37d8a42a299020 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
@@ -93,7 +93,7 @@ module {
// CHECK-NEXT: nse = 12
// CHECK-NEXT: dim = ( 3, 8 )
// CHECK-NEXT: lvl = ( 3, 8 )
- // CHECK-NEXT: pos[1] : ( 0, 4, 4, 8, 8, 12, 12,
+ // CHECK-NEXT: pos[1] : ( 0, 4, 4, 8, 8, 12,
// CHECK-NEXT: crd[1] : ( 2, 3, 5, 7, 1, 2, 4, 7, 0, 2, 4, 5,
// CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
// CHECK-NEXT: ----
More information about the Mlir-commits
mailing list