[Mlir-commits] [mlir] 5da8f30 - [mlir][sparse] add test cases for sparse tensor slices on COO tensors

Peiming Liu llvmlistbot at llvm.org
Thu Mar 16 09:44:02 PDT 2023


Author: Peiming Liu
Date: 2023-03-16T16:43:56Z
New Revision: 5da8f30d6382d4692cfe5b8e18851eea2095bacc

URL: https://github.com/llvm/llvm-project/commit/5da8f30d6382d4692cfe5b8e18851eea2095bacc
DIFF: https://github.com/llvm/llvm-project/commit/5da8f30d6382d4692cfe5b8e18851eea2095bacc.diff

LOG: [mlir][sparse] add test cases for sparse tensor slices on COO tensors

Reviewed By: aartbik, bixia

Differential Revision: https://reviews.llvm.org/D146074

Added: 
    

Modified: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
index a3e94263a40f..d51374b1fe3f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
@@ -9,6 +9,7 @@
 //
 
 // TODO: support slices on lib path
+
 #CSR = #sparse_tensor.encoding<{
   dimLevelType = [ "dense", "compressed" ]
 }>
@@ -23,6 +24,21 @@
   slice = [ (?, ?, ?), (?, ?, ?) ]
 }>
 
+#COO = #sparse_tensor.encoding<{
+  dimLevelType = [ "compressed-nu", "singleton" ]
+}>
+
+#COO_SLICE = #sparse_tensor.encoding<{
+  dimLevelType = [ "compressed-nu", "singleton" ],
+  slice = [ (1, 4, 1), (1, 4, 2) ]
+}>
+
+#COO_SLICE_DYN = #sparse_tensor.encoding<{
+  dimLevelType = [ "compressed-nu", "singleton" ],
+  slice = [ (?, ?, ?), (?, ?, ?) ]
+}>
+
+
 
 module {
   func.func @foreach_print_non_slice(%A: tensor<4x4xf64, #CSR>) {
@@ -55,6 +71,26 @@ module {
     return
   }
 
+  func.func @foreach_print_slice_coo(%A: tensor<4x4xf64, #COO_SLICE>) {
+    sparse_tensor.foreach in %A : tensor<4x4xf64, #COO_SLICE> do {
+    ^bb0(%1: index, %2: index, %v: f64) :
+      vector.print %1: index
+      vector.print %2: index
+      vector.print %v: f64
+    }
+    return
+  }
+
+  func.func @foreach_print_slice_coo_dyn(%A: tensor<?x?xf64, #COO_SLICE_DYN>) {
+    sparse_tensor.foreach in %A : tensor<?x?xf64, #COO_SLICE_DYN> do {
+    ^bb0(%1: index, %2: index, %v: f64) :
+      vector.print %1: index
+      vector.print %2: index
+      vector.print %v: f64
+    }
+    return
+  }
+
   func.func @entry() {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
@@ -76,6 +112,10 @@ module {
     %tmp = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #CSR>
     %a = tensor.extract_slice %tmp[1, 1][4, 4][1, 2] : tensor<8x8xf64, #CSR> to
                                                        tensor<4x4xf64, #CSR_SLICE>
+
+    %tmp_coo = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #COO>
+    %a_coo = tensor.extract_slice %tmp_coo[1, 1][4, 4][1, 2] : tensor<8x8xf64, #COO> to
+                                                               tensor<4x4xf64, #COO_SLICE>
     // Foreach on sparse tensor slices directly
     //
     // CHECK: 1
@@ -89,6 +129,18 @@ module {
     // CHECK-NEXT: 2.1
     //
     call @foreach_print_slice(%a) : (tensor<4x4xf64, #CSR_SLICE>) -> ()
+    // Same results for COO
+    // CHECK-NEXT: 1
+    // CHECK-NEXT: 0
+    // CHECK-NEXT: 2.3
+    // CHECK-NEXT: 2
+    // CHECK-NEXT: 3
+    // CHECK-NEXT: 1
+    // CHECK-NEXT: 3
+    // CHECK-NEXT: 2
+    // CHECK-NEXT: 2.1
+    //
+    call @foreach_print_slice_coo(%a_coo) : (tensor<4x4xf64, #COO_SLICE>) -> ()
 
     %dense = tensor.extract_slice %sa[1, 1][4, 4][1, 2] : tensor<8x8xf64> to
                                                           tensor<4x4xf64>
@@ -111,8 +163,12 @@ module {
     // TODO: Investigates why reusing the same %tmp above would cause bufferization
     // errors.
     %tmp1 = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #CSR>
-    %a_dyn = tensor.extract_slice %tmp1[%c1, %c1][%c4, %c4][%c1, %c2] :
-          tensor<8x8xf64, #CSR> to tensor<?x?xf64, #CSR_SLICE_DYN>
+    %a_dyn = tensor.extract_slice %tmp1[%c1, %c1][%c4, %c4][%c1, %c2] : tensor<8x8xf64, #CSR> to
+                                                                        tensor<?x?xf64, #CSR_SLICE_DYN>
+
+    %tmp1_coo = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #COO>
+    %a_dyn_coo = tensor.extract_slice %tmp1_coo[%c1, %c1][%c4, %c4][%c1, %c2] : tensor<8x8xf64, #COO> to
+                                                                                tensor<?x?xf64, #COO_SLICE_DYN>
     //
     // CHECK-NEXT: 1
     // CHECK-NEXT: 0
@@ -125,9 +181,22 @@ module {
     // CHECK-NEXT: 2.1
     //
     call @foreach_print_slice_dyn(%a_dyn) : (tensor<?x?xf64, #CSR_SLICE_DYN>) -> ()
+    // CHECK-NEXT: 1
+    // CHECK-NEXT: 0
+    // CHECK-NEXT: 2.3
+    // CHECK-NEXT: 2
+    // CHECK-NEXT: 3
+    // CHECK-NEXT: 1
+    // CHECK-NEXT: 3
+    // CHECK-NEXT: 2
+    // CHECK-NEXT: 2.1
+    //
+    call @foreach_print_slice_coo_dyn(%a_dyn_coo) : (tensor<?x?xf64, #COO_SLICE_DYN>) -> ()
 
     bufferization.dealloc_tensor %tmp : tensor<8x8xf64, #CSR>
     bufferization.dealloc_tensor %tmp1 : tensor<8x8xf64, #CSR>
+    bufferization.dealloc_tensor %tmp_coo : tensor<8x8xf64, #COO>
+    bufferization.dealloc_tensor %tmp1_coo : tensor<8x8xf64, #COO>
     bufferization.dealloc_tensor %b : tensor<4x4xf64, #CSR>
     return
   }


        


More information about the Mlir-commits mailing list