[Mlir-commits] [mlir] [mlir][sparse] add a few more cases to sparse_tensor.print test (PR #83338)
Aart Bik
llvmlistbot at llvm.org
Wed Feb 28 13:17:48 PST 2024
https://github.com/aartbik created https://github.com/llvm/llvm-project/pull/83338
None
>From 8777aff2abe28b479741278ab1784c1a5bc808fc Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Wed, 28 Feb 2024 13:10:25 -0800
Subject: [PATCH] [mlir][sparse] add a few more cases to sparse_tensor.print
test
---
.../SparseTensor/CPU/sparse_print.mlir | 58 ++++++++++++++++++-
1 file changed, 56 insertions(+), 2 deletions(-)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
index 797a04bb9ff862..79728fdb0f8cde 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
@@ -102,6 +102,24 @@
)
}>
+#BSR0 = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i floordiv 2 : dense,
+ j floordiv 4 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+#BSC0 = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j floordiv 4 : dense,
+ i floordiv 2 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
module {
//
@@ -114,6 +132,21 @@ module {
[ 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 3, 4, 0, 5, 0, 0 ] ]> : tensor<4x8xi32>
+ %XO = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #AllDense>
+ %XT = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #AllDenseT>
+
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %XO : tensor<4x8xi32, #AllDense>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %XT : tensor<4x8xi32, #AllDenseT>
+
%a = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #CSR>
%b = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #DCSR>
%c = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #CSC>
@@ -122,9 +155,10 @@ module {
%f = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSRC>
%g = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSC>
%h = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSCC>
+ %i = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSR0>
+ %j = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSC0>
- //
- // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 5
// CHECK-NEXT: pos[1] : ( 0, 2, 2, 2, 5,
// CHECK-NEXT: crd[1] : ( 0, 2, 2, 3, 5,
@@ -200,7 +234,25 @@ module {
// CHECK-NEXT: ----
sparse_tensor.print %h : tensor<4x8xi32, #BSCC>
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %i : tensor<4x8xi32, #BSR0>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %j : tensor<4x8xi32, #BSC0>
+
// Release the resources.
+ bufferization.dealloc_tensor %XO : tensor<4x8xi32, #AllDense>
+ bufferization.dealloc_tensor %XT : tensor<4x8xi32, #AllDenseT>
bufferization.dealloc_tensor %a : tensor<4x8xi32, #CSR>
bufferization.dealloc_tensor %b : tensor<4x8xi32, #DCSR>
bufferization.dealloc_tensor %c : tensor<4x8xi32, #CSC>
@@ -209,6 +261,8 @@ module {
bufferization.dealloc_tensor %f : tensor<4x8xi32, #BSRC>
bufferization.dealloc_tensor %g : tensor<4x8xi32, #BSC>
bufferization.dealloc_tensor %h : tensor<4x8xi32, #BSCC>
+ bufferization.dealloc_tensor %i : tensor<4x8xi32, #BSR0>
+ bufferization.dealloc_tensor %j : tensor<4x8xi32, #BSC0>
return
}
More information about the Mlir-commits
mailing list