[Mlir-commits] [mlir] add43ff - Re-apply "[mlir][SparseTensor] Add a few more tests for sparse vectorization"

Quentin Colombet llvmlistbot at llvm.org
Fri Jan 13 05:08:53 PST 2023


Author: Quentin Colombet
Date: 2023-01-13T13:02:19Z
New Revision: add43ff1805d808dd4a6946218005395991884fe

URL: https://github.com/llvm/llvm-project/commit/add43ff1805d808dd4a6946218005395991884fe
DIFF: https://github.com/llvm/llvm-project/commit/add43ff1805d808dd4a6946218005395991884fe.diff

LOG: Re-apply "[mlir][SparseTensor] Add a few more tests for sparse vectorization"

This reverts commit 93f40c983e0adbb63cbb7c59814090134d691dd1.

Update the tests to also work on window.
The order in which the `arith.constant`s appear in the output IR is
slightly different between window and linux.
Use CHECK.*-DAG for the constants.

Original message:
These tests cover muli, xor, and, addf, subf, and addi.

The tests themselves are not that interesting, their goal is to provide
code coverage for all the types of reductions currently supported.

NFC

Differential Revision: https://reviews.llvm.org/D141369

Added: 
    

Modified: 
    mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir
index e292cecb30a4f..57df898e8209d 100644
--- a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir
+++ b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir
@@ -434,3 +434,450 @@ func.func @sparse_reduction_subi(%argx: tensor<i32>,
 }
 
 // -----
+
+// From this point forward, we essentially have the same test for all
+// arithmetic operation. This is for a code coverage perspective.
+
+// Check that we vectorize xor.
+// CHECK-ON-LABEL: func.func @sparse_reduction_xor(
+// CHECK-ON-SAME: %[[VAL_0:.*]]: tensor<i32>,
+// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<i32> {
+// CHECK-ON-DAG:  %[[VAL_2:.*]] = arith.constant 8 : index
+// CHECK-ON-DAG:  %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32>
+// CHECK-ON-DAG:  %[[VAL_4:.*]] = arith.constant 0 : index
+// CHECK-ON-DAG:  %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK-ON:  %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-ON:  %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xi32>
+// CHECK-ON:  %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
+// CHECK-ON:  %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<i32>
+// CHECK-ON:  %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK-ON:  %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK-ON:  %[[VAL_12:.*]] = vector.insertelement %[[VAL_9]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xi32>
+// CHECK-ON:  %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xi32>) {
+// CHECK-ON:    %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]]
+// CHECK-ON:    %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1>
+// CHECK-ON:    %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref<?xi32>, vector<8xi1>, vector<8xi32> into vector<8xi32>
+// CHECK-ON:    %[[VAL_19:.*]] = arith.xori %[[VAL_15]], %[[VAL_18]] : vector<8xi32>
+// CHECK-ON:    %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xi32>
+// CHECK-ON:    scf.yield %[[VAL_20]] : vector<8xi32>
+// CHECK-ON:  } {"Emitted from" = "linalg.generic"}
+// CHECK-ON:  %[[VAL_21:.*]] = vector.reduction <xor>, %[[VAL_22:.*]] : vector<8xi32> into i32
+// CHECK-ON:  memref.store %[[VAL_21]], %[[VAL_8]][] : memref<i32>
+// CHECK-ON:  %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<i32>
+// CHECK-ON:  return %[[VAL_23]] : tensor<i32>
+// CHECK-ON: }
+//
+// CHECK-OFF-LABEL: func.func @sparse_reduction_xor(
+// CHECK-OFF-SAME:  %[[VAL_0:.*]]: tensor<i32>,
+// CHECK-OFF-SAME:  %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<i32> {
+// CHECK-OFF-DAG:   %[[VAL_2:.*]] = arith.constant 0 : index
+// CHECK-OFF-DAG:   %[[VAL_3:.*]] = arith.constant 1 : index
+// CHECK-OFF:   %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-OFF:   %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xi32>
+// CHECK-OFF:   %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
+// CHECK-OFF:   %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i32>
+// CHECK-OFF:   %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (i32) {
+// CHECK-OFF:     %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xi32>
+// CHECK-OFF:     %[[VAL_14:.*]] = arith.xori %[[VAL_12]], %[[VAL_13]] : i32
+// CHECK-OFF:     scf.yield %[[VAL_14]] : i32
+// CHECK-OFF:   } {"Emitted from" = "linalg.generic"}
+// CHECK-OFF:   memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref<i32>
+// CHECK-OFF:   %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref<i32>
+// CHECK-OFF:   return %[[VAL_16]] : tensor<i32>
+// CHECK-OFF: }
+
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+
+#trait = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a (in)
+    affine_map<(i) -> ()>    // x (out)
+  ],
+  iterator_types = ["reduction"]
+}
+
+func.func @sparse_reduction_xor(%argx: tensor<i32>,
+                             %arga: tensor<?xi32, #SparseVector>)
+ -> tensor<i32> {
+  %0 = linalg.generic #trait
+     ins(%arga: tensor<?xi32, #SparseVector>)
+      outs(%argx: tensor<i32>) {
+      ^bb(%a: i32, %x: i32):
+        %t = arith.xori %x, %a: i32
+        linalg.yield %t : i32
+  } -> tensor<i32>
+  return %0 : tensor<i32>
+}
+
+// -----
+// Check that we vectorize and.
+// CHECK-ON-LABEL: func.func @sparse_reduction_and(
+// CHECK-ON-SAME:   %[[VAL_0:.*]]: tensor<i32>,
+// CHECK-ON-SAME:   %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<i32> {
+// CHECK-ON-DAG:   %[[VAL_2:.*]] = arith.constant 8 : index
+// CHECK-ON-DAG:   %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32>
+// CHECK-ON-DAG:   %[[VAL_4:.*]] = arith.constant 0 : index
+// CHECK-ON-DAG:   %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK-ON:   %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-ON:   %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xi32>
+// CHECK-ON:   %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
+// CHECK-ON:   %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<i32>
+// CHECK-ON:   %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_12:.*]] = vector.broadcast %[[VAL_9]] : i32 to vector<8xi32>
+// CHECK-ON:   %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xi32>) {
+// CHECK-ON:     %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]]
+// CHECK-ON:     %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1>
+// CHECK-ON:     %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref<?xi32>, vector<8xi1>, vector<8xi32> into vector<8xi32>
+// CHECK-ON:     %[[VAL_19:.*]] = arith.andi %[[VAL_15]], %[[VAL_18]] : vector<8xi32>
+// CHECK-ON:     %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xi32>
+// CHECK-ON:     scf.yield %[[VAL_20]] : vector<8xi32>
+// CHECK-ON:   } {"Emitted from" = "linalg.generic"}
+// CHECK-ON:   %[[VAL_21:.*]] = vector.reduction <and>, %[[VAL_22:.*]] : vector<8xi32> into i32
+// CHECK-ON:   memref.store %[[VAL_21]], %[[VAL_8]][] : memref<i32>
+// CHECK-ON:   %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<i32>
+// CHECK-ON:   return %[[VAL_23]] : tensor<i32>
+// CHECK-ON: }
+//
+// CHECK-OFF-LABEL: func.func @sparse_reduction_and(
+// CHECK-OFF-SAME:   %[[VAL_0:.*]]: tensor<i32>,
+// CHECK-OFF-SAME:   %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<i32> {
+// CHECK-OFF-DAG:   %[[VAL_2:.*]] = arith.constant 0 : index
+// CHECK-OFF-DAG:   %[[VAL_3:.*]] = arith.constant 1 : index
+// CHECK-OFF:   %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-OFF:   %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xi32>
+// CHECK-OFF:   %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
+// CHECK-OFF:   %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i32>
+// CHECK-OFF:   %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (i32) {
+// CHECK-OFF:     %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xi32>
+// CHECK-OFF:     %[[VAL_14:.*]] = arith.andi %[[VAL_12]], %[[VAL_13]] : i32
+// CHECK-OFF:     scf.yield %[[VAL_14]] : i32
+// CHECK-OFF:   } {"Emitted from" = "linalg.generic"}
+// CHECK-OFF:   memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref<i32>
+// CHECK-OFF:   %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref<i32>
+// CHECK-OFF:   return %[[VAL_16]] : tensor<i32>
+// CHECK-OFF: }
+
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+
+#trait = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a (in)
+    affine_map<(i) -> ()>    // x (out)
+  ],
+  iterator_types = ["reduction"]
+}
+
+func.func @sparse_reduction_and(%argx: tensor<i32>,
+                             %arga: tensor<?xi32, #SparseVector>)
+ -> tensor<i32> {
+  %0 = linalg.generic #trait
+     ins(%arga: tensor<?xi32, #SparseVector>)
+      outs(%argx: tensor<i32>) {
+      ^bb(%a: i32, %x: i32):
+        %t = arith.andi %x, %a: i32
+        linalg.yield %t : i32
+  } -> tensor<i32>
+  return %0 : tensor<i32>
+}
+
+// -----
+// Check that we vectorize muli.
+// CHECK-ON-LABEL: func.func @sparse_reduction_muli(
+// CHECK-ON-SAME:   %[[VAL_0:.*]]: tensor<i32>,
+// CHECK-ON-SAME:   %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<i32> {
+// CHECK-ON-DAG:   %[[VAL_2:.*]] = arith.constant 8 : index
+// CHECK-ON-DAG:   %[[VAL_3:.*]] = arith.constant dense<1> : vector<8xi32>
+// CHECK-ON-DAG:   %[[VAL_4:.*]] = arith.constant 0 : index
+// CHECK-ON-DAG:   %[[VAL_5:.*]] = arith.constant dense<0> : vector<8xi32>
+// CHECK-ON-DAG:   %[[VAL_6:.*]] = arith.constant 1 : index
+// CHECK-ON:   %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-ON:   %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xi32>
+// CHECK-ON:   %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
+// CHECK-ON:   %[[VAL_10:.*]] = memref.load %[[VAL_9]][] : memref<i32>
+// CHECK-ON:   %[[VAL_11:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_13:.*]] = vector.insertelement %[[VAL_10]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xi32>
+// CHECK-ON:   %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_2]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (vector<8xi32>) {
+// CHECK-ON:     %[[VAL_17:.*]] = affine.min #map(%[[VAL_12]], %[[VAL_15]]){{\[}}%[[VAL_2]]]
+// CHECK-ON:     %[[VAL_18:.*]] = vector.create_mask %[[VAL_17]] : vector<8xi1>
+// CHECK-ON:     %[[VAL_19:.*]] = vector.maskedload %[[VAL_8]]{{\[}}%[[VAL_15]]], %[[VAL_18]], %[[VAL_5]] : memref<?xi32>, vector<8xi1>, vector<8xi32> into vector<8xi32>
+// CHECK-ON:     %[[VAL_20:.*]] = arith.muli %[[VAL_16]], %[[VAL_19]] : vector<8xi32>
+// CHECK-ON:     %[[VAL_21:.*]] = arith.select %[[VAL_18]], %[[VAL_20]], %[[VAL_16]] : vector<8xi1>, vector<8xi32>
+// CHECK-ON:     scf.yield %[[VAL_21]] : vector<8xi32>
+// CHECK-ON:   } {"Emitted from" = "linalg.generic"}
+// CHECK-ON:   %[[VAL_22:.*]] = vector.reduction <mul>, %[[VAL_23:.*]] : vector<8xi32> into i32
+// CHECK-ON:   memref.store %[[VAL_22]], %[[VAL_9]][] : memref<i32>
+// CHECK-ON:   %[[VAL_24:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<i32>
+// CHECK-ON:   return %[[VAL_24]] : tensor<i32>
+// CHECK-ON: }
+//
+// CHECK-OFF-LABEL: func.func @sparse_reduction_muli(
+// CHECK-OFF-SAME:   %[[VAL_0:.*]]: tensor<i32>,
+// CHECK-OFF-SAME:   %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<i32> {
+// CHECK-OFF-DAG:   %[[VAL_2:.*]] = arith.constant 0 : index
+// CHECK-OFF-DAG:   %[[VAL_3:.*]] = arith.constant 1 : index
+// CHECK-OFF:   %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-OFF:   %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xi32>
+// CHECK-OFF:   %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
+// CHECK-OFF:   %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i32>
+// CHECK-OFF:   %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (i32) {
+// CHECK-OFF:     %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xi32>
+// CHECK-OFF:     %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_13]] : i32
+// CHECK-OFF:     scf.yield %[[VAL_14]] : i32
+// CHECK-OFF:   } {"Emitted from" = "linalg.generic"}
+// CHECK-OFF:   memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref<i32>
+// CHECK-OFF:   %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref<i32>
+// CHECK-OFF:   return %[[VAL_16]] : tensor<i32>
+// CHECK-OFF: }
+
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+
+#trait = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a (in)
+    affine_map<(i) -> ()>    // x (out)
+  ],
+  iterator_types = ["reduction"]
+}
+
+func.func @sparse_reduction_muli(%argx: tensor<i32>,
+                             %arga: tensor<?xi32, #SparseVector>)
+ -> tensor<i32> {
+  %0 = linalg.generic #trait
+     ins(%arga: tensor<?xi32, #SparseVector>)
+      outs(%argx: tensor<i32>) {
+      ^bb(%a: i32, %x: i32):
+        %t = arith.muli %x, %a: i32
+        linalg.yield %t : i32
+  } -> tensor<i32>
+  return %0 : tensor<i32>
+}
+
+// -----
+// Check that we vectorize addi.
+// CHECK-ON-LABEL: func.func @sparse_reduction_addi(
+// CHECK-ON-SAME:   %[[VAL_0:.*]]: tensor<i32>,
+// CHECK-ON-SAME:   %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<i32> {
+// CHECK-ON-DAG:   %[[VAL_2:.*]] = arith.constant 8 : index
+// CHECK-ON-DAG:   %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32>
+// CHECK-ON-DAG:   %[[VAL_4:.*]] = arith.constant 0 : index
+// CHECK-ON-DAG:   %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK-ON:   %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-ON:   %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xi32>
+// CHECK-ON:   %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
+// CHECK-ON:   %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<i32>
+// CHECK-ON:   %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_12:.*]] = vector.insertelement %[[VAL_9]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xi32>
+// CHECK-ON:   %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xi32>) {
+// CHECK-ON:     %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]]
+// CHECK-ON:     %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1>
+// CHECK-ON:     %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref<?xi32>, vector<8xi1>, vector<8xi32> into vector<8xi32>
+// CHECK-ON:     %[[VAL_19:.*]] = arith.addi %[[VAL_15]], %[[VAL_18]] : vector<8xi32>
+// CHECK-ON:     %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xi32>
+// CHECK-ON:     scf.yield %[[VAL_20]] : vector<8xi32>
+// CHECK-ON:   } {"Emitted from" = "linalg.generic"}
+// CHECK-ON:   %[[VAL_21:.*]] = vector.reduction <add>, %[[VAL_22:.*]] : vector<8xi32> into i32
+// CHECK-ON:   memref.store %[[VAL_21]], %[[VAL_8]][] : memref<i32>
+// CHECK-ON:   %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<i32>
+// CHECK-ON:   return %[[VAL_23]] : tensor<i32>
+// CHECK-ON: }
+//
+// CHECK-OFF-LABEL: func.func @sparse_reduction_addi(
+// CHECK-OFF-SAME:   %[[VAL_0:.*]]: tensor<i32>,
+// CHECK-OFF-SAME:   %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<i32> {
+// CHECK-OFF-DAG:   %[[VAL_2:.*]] = arith.constant 0 : index
+// CHECK-OFF-DAG:   %[[VAL_3:.*]] = arith.constant 1 : index
+// CHECK-OFF:   %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-OFF:   %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xi32>
+// CHECK-OFF:   %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
+// CHECK-OFF:   %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i32>
+// CHECK-OFF:   %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (i32) {
+// CHECK-OFF:     %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xi32>
+// CHECK-OFF:     %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_13]] : i32
+// CHECK-OFF:     scf.yield %[[VAL_14]] : i32
+// CHECK-OFF:   } {"Emitted from" = "linalg.generic"}
+// CHECK-OFF:   memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref<i32>
+// CHECK-OFF:   %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref<i32>
+// CHECK-OFF:   return %[[VAL_16]] : tensor<i32>
+// CHECK-OFF: }
+
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+
+#trait = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a (in)
+    affine_map<(i) -> ()>    // x (out)
+  ],
+  iterator_types = ["reduction"]
+}
+
+func.func @sparse_reduction_addi(%argx: tensor<i32>,
+                             %arga: tensor<?xi32, #SparseVector>)
+ -> tensor<i32> {
+  %0 = linalg.generic #trait
+     ins(%arga: tensor<?xi32, #SparseVector>)
+      outs(%argx: tensor<i32>) {
+      ^bb(%a: i32, %x: i32):
+        %t = arith.addi %x, %a: i32
+        linalg.yield %t : i32
+  } -> tensor<i32>
+  return %0 : tensor<i32>
+}
+
+// -----
+// Check that we vectorize subf.
+// CHECK-ON-LABEL: func.func @sparse_reduction_subf(
+// CHECK-ON-SAME:   %[[VAL_0:.*]]: tensor<f32>,
+// CHECK-ON-SAME:   %[[VAL_1:.*]]: tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<f32> {
+// CHECK-ON-DAG:   %[[VAL_2:.*]] = arith.constant 8 : index
+// CHECK-ON-DAG:   %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32>
+// CHECK-ON-DAG:   %[[VAL_4:.*]] = arith.constant 0 : index
+// CHECK-ON-DAG:   %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK-ON:   %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-ON:   %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf32>
+// CHECK-ON:   %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f32>
+// CHECK-ON:   %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<f32>
+// CHECK-ON:   %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_12:.*]] = vector.insertelement %[[VAL_9]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xf32>
+// CHECK-ON:   %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xf32>) {
+// CHECK-ON:     %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]]
+// CHECK-ON:     %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1>
+// CHECK-ON:     %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref<?xf32>, vector<8xi1>, vector<8xf32> into vector<8xf32>
+// CHECK-ON:     %[[VAL_19:.*]] = arith.subf %[[VAL_15]], %[[VAL_18]] : vector<8xf32>
+// CHECK-ON:     %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xf32>
+// CHECK-ON:     scf.yield %[[VAL_20]] : vector<8xf32>
+// CHECK-ON:   } {"Emitted from" = "linalg.generic"}
+// CHECK-ON:   %[[VAL_21:.*]] = vector.reduction <add>, %[[VAL_22:.*]] : vector<8xf32> into f32
+// CHECK-ON:   memref.store %[[VAL_21]], %[[VAL_8]][] : memref<f32>
+// CHECK-ON:   %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<f32>
+// CHECK-ON:   return %[[VAL_23]] : tensor<f32>
+// CHECK-ON: }
+//
+// CHECK-OFF-LABEL: func.func @sparse_reduction_subf(
+// CHECK-OFF-SAME:   %[[VAL_0:.*]]: tensor<f32>,
+// CHECK-OFF-SAME:   %[[VAL_1:.*]]: tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<f32> {
+// CHECK-OFF-DAG:   %[[VAL_2:.*]] = arith.constant 0 : index
+// CHECK-OFF-DAG:   %[[VAL_3:.*]] = arith.constant 1 : index
+// CHECK-OFF:   %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-OFF:   %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf32>
+// CHECK-OFF:   %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f32>
+// CHECK-OFF:   %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<f32>
+// CHECK-OFF:   %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (f32) {
+// CHECK-OFF:     %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xf32>
+// CHECK-OFF:     %[[VAL_14:.*]] = arith.subf %[[VAL_12]], %[[VAL_13]] : f32
+// CHECK-OFF:     scf.yield %[[VAL_14]] : f32
+// CHECK-OFF:   } {"Emitted from" = "linalg.generic"}
+// CHECK-OFF:   memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref<f32>
+// CHECK-OFF:   %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref<f32>
+// CHECK-OFF:   return %[[VAL_16]] : tensor<f32>
+// CHECK-OFF: }
+
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+
+#trait = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a (in)
+    affine_map<(i) -> ()>    // x (out)
+  ],
+  iterator_types = ["reduction"]
+}
+
+func.func @sparse_reduction_subf(%argx: tensor<f32>,
+                             %arga: tensor<?xf32, #SparseVector>)
+ -> tensor<f32> {
+  %0 = linalg.generic #trait
+     ins(%arga: tensor<?xf32, #SparseVector>)
+      outs(%argx: tensor<f32>) {
+      ^bb(%a: f32, %x: f32):
+        %t = arith.subf %x, %a: f32
+        linalg.yield %t : f32
+  } -> tensor<f32>
+  return %0 : tensor<f32>
+}
+
+// -----
+// Check that we vectorize addf.
+// CHECK-ON-LABEL: func.func @sparse_reduction_addf(
+// CHECK-ON-SAME:   %[[VAL_0:.*]]: tensor<f32>,
+// CHECK-ON-SAME:   %[[VAL_1:.*]]: tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<f32> {
+// CHECK-ON-DAG:   %[[VAL_2:.*]] = arith.constant 8 : index
+// CHECK-ON-DAG:   %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32>
+// CHECK-ON-DAG:   %[[VAL_4:.*]] = arith.constant 0 : index
+// CHECK-ON-DAG:   %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK-ON:   %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-ON:   %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf32>
+// CHECK-ON:   %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f32>
+// CHECK-ON:   %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<f32>
+// CHECK-ON:   %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
+// CHECK-ON:   %[[VAL_12:.*]] = vector.insertelement %[[VAL_9]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xf32>
+// CHECK-ON:   %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xf32>) {
+// CHECK-ON:     %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]]
+// CHECK-ON:     %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1>
+// CHECK-ON:     %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref<?xf32>, vector<8xi1>, vector<8xf32> into vector<8xf32>
+// CHECK-ON:     %[[VAL_19:.*]] = arith.addf %[[VAL_15]], %[[VAL_18]] : vector<8xf32>
+// CHECK-ON:     %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xf32>
+// CHECK-ON:     scf.yield %[[VAL_20]] : vector<8xf32>
+// CHECK-ON:   } {"Emitted from" = "linalg.generic"}
+// CHECK-ON:   %[[VAL_21:.*]] = vector.reduction <add>, %[[VAL_22:.*]] : vector<8xf32> into f32
+// CHECK-ON:   memref.store %[[VAL_21]], %[[VAL_8]][] : memref<f32>
+// CHECK-ON:   %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<f32>
+// CHECK-ON:   return %[[VAL_23]] : tensor<f32>
+// CHECK-ON: }
+//
+// CHECK-OFF-LABEL: func.func @sparse_reduction_addf(
+// CHECK-OFF-SAME:    %[[VAL_0:.*]]: tensor<f32>,
+// CHECK-OFF-SAME:    %[[VAL_1:.*]]: tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<f32> {
+// CHECK-OFF-DAG:   %[[VAL_2:.*]] = arith.constant 0 : index
+// CHECK-OFF-DAG:   %[[VAL_3:.*]] = arith.constant 1 : index
+// CHECK-OFF:   %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-OFF:   %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf32>
+// CHECK-OFF:   %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f32>
+// CHECK-OFF:   %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<f32>
+// CHECK-OFF:   %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK-OFF:   %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (f32) {
+// CHECK-OFF:     %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xf32>
+// CHECK-OFF:     %[[VAL_14:.*]] = arith.addf %[[VAL_12]], %[[VAL_13]] : f32
+// CHECK-OFF:     scf.yield %[[VAL_14]] : f32
+// CHECK-OFF:   } {"Emitted from" = "linalg.generic"}
+// CHECK-OFF:   memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref<f32>
+// CHECK-OFF:   %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref<f32>
+// CHECK-OFF:   return %[[VAL_16]] : tensor<f32>
+// CHECK-OFF: }
+
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+
+#trait = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a (in)
+    affine_map<(i) -> ()>    // x (out)
+  ],
+  iterator_types = ["reduction"]
+}
+
+func.func @sparse_reduction_addf(%argx: tensor<f32>,
+                             %arga: tensor<?xf32, #SparseVector>)
+ -> tensor<f32> {
+  %0 = linalg.generic #trait
+     ins(%arga: tensor<?xf32, #SparseVector>)
+      outs(%argx: tensor<f32>) {
+      ^bb(%a: f32, %x: f32):
+        %t = arith.addf %x, %a: f32
+        linalg.yield %t : f32
+  } -> tensor<f32>
+  return %0 : tensor<f32>
+}


        


More information about the Mlir-commits mailing list