[Mlir-commits] [mlir] 2f0ee17 - [mlir][sparse] test for SIMD reduction chaining in consecutive vector loops

Aart Bik llvmlistbot at llvm.org
Fri Nov 5 10:14:38 PDT 2021


Author: Aart Bik
Date: 2021-11-05T10:14:17-07:00
New Revision: 2f0ee1701790bc5d2d996e70aaedcc1dc383b1ec

URL: https://github.com/llvm/llvm-project/commit/2f0ee1701790bc5d2d996e70aaedcc1dc383b1ec
DIFF: https://github.com/llvm/llvm-project/commit/2f0ee1701790bc5d2d996e70aaedcc1dc383b1ec.diff

LOG: [mlir][sparse] test for SIMD reduction chaining in consecutive vector loops

Reviewed By: bixia

Differential Revision: https://reviews.llvm.org/D113197

Added: 
    mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
new file mode 100644
index 0000000000000..ab694417a38c3
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
+// RUN:   FileCheck %s
+
+#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","compressed"]}>
+
+#trait = {
+  indexing_maps = [
+    affine_map<(i,j) -> (i,j)>,  // a (in)
+    affine_map<(i,j) -> (i,j)>,  // b (in)
+    affine_map<(i,j) -> ()>      // x (out)
+  ],
+  iterator_types = ["reduction", "reduction"]
+}
+
+// Verifies that the SIMD reductions in the two for-loops after the
+// while-loop are chained before horizontally reducing these back to scalar.
+//
+// CHECK-LABEL:   func @sparse_matrix_sum(
+// CHECK-SAME:      %[[VAL_0:.*]]: tensor<f64> {linalg.inplaceable = true},
+// CHECK-SAME:      %[[VAL_1:.*]]: tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>,
+// CHECK-SAME:      %[[VAL_2:.*]]: tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<f64> {
+// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf64>
+// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 8 : index
+// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 0 : i32
+// CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 0 : index
+// CHECK-DAG:       %[[VAL_7:.*]] = arith.constant 64 : index
+// CHECK-DAG:       %[[VAL_8:.*]] = arith.constant 1 : index
+// CHECK:           %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_8]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK:           %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_8]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK:           %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK:           %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_8]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK:           %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_8]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK:           %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK:           %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_0]] : memref<f64>
+// CHECK:           %[[VAL_16:.*]] = tensor.extract %[[VAL_0]][] : tensor<f64>
+// CHECK:           %[[VAL_17:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_6]] to %[[VAL_7]] step %[[VAL_8]] iter_args(%[[VAL_19:.*]] = %[[VAL_16]]) -> (f64) {
+// CHECK:             %[[VAL_20:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK:             %[[VAL_21:.*]] = arith.addi %[[VAL_18]], %[[VAL_8]] : index
+// CHECK:             %[[VAL_22:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK:             %[[VAL_23:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_18]]] : memref<?xindex>
+// CHECK:             %[[VAL_24:.*]] = arith.addi %[[VAL_18]], %[[VAL_8]] : index
+// CHECK:             %[[VAL_25:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_24]]] : memref<?xindex>
+// CHECK:             %[[VAL_26:.*]]:3 = scf.while (%[[VAL_27:.*]] = %[[VAL_20]], %[[VAL_28:.*]] = %[[VAL_23]], %[[VAL_29:.*]] = %[[VAL_19]]) : (index, index, f64) -> (index, index, f64) {
+// CHECK:               %[[VAL_30:.*]] = arith.cmpi ult, %[[VAL_27]], %[[VAL_22]] : index
+// CHECK:               %[[VAL_31:.*]] = arith.cmpi ult, %[[VAL_28]], %[[VAL_25]] : index
+// CHECK:               %[[VAL_32:.*]] = arith.andi %[[VAL_30]], %[[VAL_31]] : i1
+// CHECK:               scf.condition(%[[VAL_32]]) %[[VAL_27]], %[[VAL_28]], %[[VAL_29]] : index, index, f64
+// CHECK:             } do {
+// CHECK:             ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: f64):
+// CHECK:               %[[VAL_36:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_33]]] : memref<?xindex>
+// CHECK:               %[[VAL_37:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_34]]] : memref<?xindex>
+// CHECK:               %[[VAL_38:.*]] = arith.cmpi ult, %[[VAL_37]], %[[VAL_36]] : index
+// CHECK:               %[[VAL_39:.*]] = select %[[VAL_38]], %[[VAL_37]], %[[VAL_36]] : index
+// CHECK:               %[[VAL_40:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
+// CHECK:               %[[VAL_41:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
+// CHECK:               %[[VAL_42:.*]] = arith.andi %[[VAL_40]], %[[VAL_41]] : i1
+// CHECK:               %[[VAL_43:.*]] = scf.if %[[VAL_42]] -> (f64) {
+// CHECK:                 %[[VAL_44:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_33]]] : memref<?xf64>
+// CHECK:                 %[[VAL_45:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_34]]] : memref<?xf64>
+// CHECK:                 %[[VAL_46:.*]] = arith.addf %[[VAL_44]], %[[VAL_45]] : f64
+// CHECK:                 %[[VAL_47:.*]] = arith.addf %[[VAL_35]], %[[VAL_46]] : f64
+// CHECK:                 scf.yield %[[VAL_47]] : f64
+// CHECK:               } else {
+// CHECK:                 %[[VAL_48:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
+// CHECK:                 %[[VAL_49:.*]] = scf.if %[[VAL_48]] -> (f64) {
+// CHECK:                   %[[VAL_50:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_33]]] : memref<?xf64>
+// CHECK:                   %[[VAL_51:.*]] = arith.addf %[[VAL_35]], %[[VAL_50]] : f64
+// CHECK:                   scf.yield %[[VAL_51]] : f64
+// CHECK:                 } else {
+// CHECK:                   %[[VAL_52:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
+// CHECK:                   %[[VAL_53:.*]] = scf.if %[[VAL_52]] -> (f64) {
+// CHECK:                     %[[VAL_54:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_34]]] : memref<?xf64>
+// CHECK:                     %[[VAL_55:.*]] = arith.addf %[[VAL_35]], %[[VAL_54]] : f64
+// CHECK:                     scf.yield %[[VAL_55]] : f64
+// CHECK:                   } else {
+// CHECK:                     scf.yield %[[VAL_35]] : f64
+// CHECK:                   }
+// CHECK:                   scf.yield %[[VAL_56:.*]] : f64
+// CHECK:                 }
+// CHECK:                 scf.yield %[[VAL_57:.*]] : f64
+// CHECK:               }
+// CHECK:               %[[VAL_58:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
+// CHECK:               %[[VAL_59:.*]] = arith.addi %[[VAL_33]], %[[VAL_8]] : index
+// CHECK:               %[[VAL_60:.*]] = select %[[VAL_58]], %[[VAL_59]], %[[VAL_33]] : index
+// CHECK:               %[[VAL_61:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
+// CHECK:               %[[VAL_62:.*]] = arith.addi %[[VAL_34]], %[[VAL_8]] : index
+// CHECK:               %[[VAL_63:.*]] = select %[[VAL_61]], %[[VAL_62]], %[[VAL_34]] : index
+// CHECK:               scf.yield %[[VAL_60]], %[[VAL_63]], %[[VAL_64:.*]] : index, index, f64
+// CHECK:             }
+// CHECK:             %[[VAL_65:.*]] = vector.insertelement %[[VAL_66:.*]]#2, %[[VAL_3]]{{\[}}%[[VAL_5]] : i32] : vector<8xf64>
+// CHECK:             %[[VAL_67:.*]] = scf.for %[[VAL_68:.*]] = %[[VAL_66]]#0 to %[[VAL_22]] step %[[VAL_4]] iter_args(%[[VAL_69:.*]] = %[[VAL_65]]) -> (vector<8xf64>) {
+// CHECK:               %[[VAL_70:.*]] = affine.min #map(%[[VAL_22]], %[[VAL_68]])
+// CHECK:               %[[VAL_71:.*]] = vector.create_mask %[[VAL_70]] : vector<8xi1>
+// CHECK:               %[[VAL_72:.*]] = vector.maskedload %[[VAL_11]]{{\[}}%[[VAL_68]]], %[[VAL_71]], %[[VAL_3]] : memref<?xf64>, vector<8xi1>, vector<8xf64> into vector<8xf64>
+// CHECK:               %[[VAL_73:.*]] = arith.addf %[[VAL_69]], %[[VAL_72]] : vector<8xf64>
+// CHECK:               %[[VAL_74:.*]] = select %[[VAL_71]], %[[VAL_73]], %[[VAL_69]] : vector<8xi1>, vector<8xf64>
+// CHECK:               scf.yield %[[VAL_74]] : vector<8xf64>
+// CHECK:             }
+// CHECK:             %[[VAL_75:.*]] = scf.for %[[VAL_76:.*]] = %[[VAL_66]]#1 to %[[VAL_25]] step %[[VAL_4]] iter_args(%[[VAL_77:.*]] = %[[VAL_78:.*]]) -> (vector<8xf64>) {
+// CHECK:               %[[VAL_79:.*]] = affine.min #map(%[[VAL_25]], %[[VAL_76]])
+// CHECK:               %[[VAL_80:.*]] = vector.create_mask %[[VAL_79]] : vector<8xi1>
+// CHECK:               %[[VAL_81:.*]] = vector.maskedload %[[VAL_14]]{{\[}}%[[VAL_76]]], %[[VAL_80]], %[[VAL_3]] : memref<?xf64>, vector<8xi1>, vector<8xf64> into vector<8xf64>
+// CHECK:               %[[VAL_82:.*]] = arith.addf %[[VAL_77]], %[[VAL_81]] : vector<8xf64>
+// CHECK:               %[[VAL_83:.*]] = select %[[VAL_80]], %[[VAL_82]], %[[VAL_77]] : vector<8xi1>, vector<8xf64>
+// CHECK:               scf.yield %[[VAL_83]] : vector<8xf64>
+// CHECK:             }
+// CHECK:             %[[VAL_84:.*]] = vector.reduction "add", %[[VAL_85:.*]] : vector<8xf64> into f64
+// CHECK:             scf.yield %[[VAL_84]] : f64
+// CHECK:           }
+// CHECK:           memref.store %[[VAL_86:.*]], %[[VAL_15]][] : memref<f64>
+// CHECK:           %[[VAL_87:.*]] = memref.tensor_load %[[VAL_15]] : memref<f64>
+// CHECK:           return %[[VAL_87]] : tensor<f64>
+// CHECK:         }
+func @sparse_matrix_sum(%argx: tensor<f64> {linalg.inplaceable = true},
+                         %arga: tensor<64x32xf64, #SparseMatrix>,
+                         %argb: tensor<64x32xf64, #SparseMatrix>) -> tensor<f64> {
+  %0 = linalg.generic #trait
+     ins(%arga, %argb: tensor<64x32xf64, #SparseMatrix>,
+                       tensor<64x32xf64, #SparseMatrix>)
+      outs(%argx: tensor<f64>) {
+      ^bb(%a: f64, %b: f64, %x: f64):
+        %m = arith.addf %a, %b : f64
+        %t = arith.addf %x, %m : f64
+        linalg.yield %t : f64
+  } -> tensor<f64>
+  return %0 : tensor<f64>
+}


        


More information about the Mlir-commits mailing list