[Mlir-commits] [mlir] 46ea07b - [mlir][transform] Extend `MaskedVectorizeOp` to work for regular vectorization too

Andrzej Warzynski llvmlistbot at llvm.org
Fri Sep 1 08:20:24 PDT 2023


Author: Andrzej Warzynski
Date: 2023-09-01T16:19:55+01:00
New Revision: 46ea07bb140ee93f68f10b83b0b6f04aa2908129

URL: https://github.com/llvm/llvm-project/commit/46ea07bb140ee93f68f10b83b0b6f04aa2908129
DIFF: https://github.com/llvm/llvm-project/commit/46ea07bb140ee93f68f10b83b0b6f04aa2908129.diff

LOG: [mlir][transform] Extend `MaskedVectorizeOp` to work for regular vectorization too

This patch extends MaskedVectorizeOp so that it can be used for
"regular" (as opposed to "masked") vectorization as well. While we can
already use VectorizeOp for "regular" vectorization, that Op will also
apply various patterns on top of vectorization. That means that at the
moment, when testing the vectorizer with VectorizeOp, we are effectively
testing "vectorization + patterns", i.e. 2 things at a time.

With these updates, you can trigger "regular" vectorization with
MaskedVectorizeOp by simply skipping the vector sizes:

  transform.structured.masked_vectorize %target : !transform.any_op

Following this change we should probably also rename this Op.

Differential Revision: https://reviews.llvm.org/D157774

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
    mlir/test/Dialect/Linalg/vectorization.mlir
    mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 3777fe2c87bba6..71db26999a4944 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -2079,8 +2079,20 @@ def MaskedVectorizeOp : Op<Transform_Dialect, "structured.masked_vectorize",
     [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
      TransformOpInterface, ReportTrackingListenerFailuresOpTrait]> {
   let description = [{
-    Vectorize the target ops, which must be Linalg ops, with masked vectors
-    of the specified size.
+    Vectorize the target ops, which must be Linalg ops. 
+
+    Use the optional vector sizes to specify exactly what configuration the
+    vectorizer should use. It will then use masked vectors of the specified
+    size to enforce this configuration ("masked vectorization"). If no vector
+    sizes are specified, the vectorizer will infer the shapes to use from the
+    target Linalg ops ("regular vectorization"). More specifically:
+
+    ```mlir
+    # Masked vectorization - vector sizes are specified explicitly
+    transform.structured.masked_vectorize %target vector_sizes [1, 4] : !transform.any_op
+    # Regular vectorization - vector sizes are inferred from the target Op
+    transform.structured.masked_vectorize %target : !transform.any_op
+    ```
 
     The vector sizes can be either static or dynamic (SSA values). In case of
     SSA values, the handle must be mapped to exactly one payload op with
@@ -2090,14 +2102,14 @@ def MaskedVectorizeOp : Op<Transform_Dialect, "structured.masked_vectorize",
     counterpart iteration space sizes.
 
     Typically this operator should be applied to linalg operations that have
-    already be tiled to the appropriate sizes.
+    already been tiled to the appropriate sizes.
 
     #### Return modes:
 
-    This operation produces a definite failure if the dynamic vector sizes (SSA
-    values) do not satisfy the constraints mentioned above. It produces a
-    silenceable failure if at least one target op is not a Linalg op or fails to
-    vectorize.
+    This operation produces a silenceable failure if at least one target op is
+    not a Linalg op or fails to vectorize. It produces a definite failure if
+    the dynamic vector sizes (SSA values) do not satisfy the constraints
+    mentioned above. 
   }];
 
   let arguments = (ins TransformHandleTypeInterface:$target,
@@ -2110,11 +2122,12 @@ def MaskedVectorizeOp : Op<Transform_Dialect, "structured.masked_vectorize",
 
   let results = (outs);
   let assemblyFormat = [{
-      $target
+      $target oilist(
       `vector_sizes` custom<DynamicIndexList>($vector_sizes,
                                               $static_vector_sizes,
                                               type($vector_sizes),
                                               $scalable_sizes)
+      )
       attr-dict
       `:` type($target)
   }];

diff  --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index 465558721f969e..d14246b18fd133 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -13,8 +13,7 @@ func.func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memre
 transform.sequence failures(propagate) {
 ^bb1(%arg1: !transform.any_op):
   %0 = transform.structured.match ops{["linalg.dot"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-  %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-  %2 = transform.structured.vectorize %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+  transform.structured.masked_vectorize %0  : !transform.any_op
 }
 
 // -----

diff  --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
index 7c75e0ff3044d9..84e36c8912c650 100644
--- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
+++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
@@ -35,13 +35,12 @@ transform.sequence failures(propagate) {
 }
 
 // -----
-
-#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 func.func @vectorize_nd_tensor_extract_constant_idx(%arg0: tensor<3x3xf32>, %arg2: tensor<1x1x3xf32>) -> tensor<1x1x3xf32> {
   %c0 = arith.constant 1 : index
   %c1 = arith.constant 2 : index
   %2 = linalg.generic {
-    indexing_maps = [#map1],
+    indexing_maps = [#map],
     iterator_types = ["parallel", "parallel", "parallel"]
   } outs(%arg2 : tensor<1x1x3xf32>) {
   ^bb0(%arg4: f32):
@@ -51,23 +50,22 @@ func.func @vectorize_nd_tensor_extract_constant_idx(%arg0: tensor<3x3xf32>, %arg
   return %2 : tensor<1x1x3xf32>
 }
 
+// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1) -> (0, 0, 0)>
 // CHECK-LABEL:   func.func @vectorize_nd_tensor_extract_constant_idx(
 // CHECK-SAME:      %[[ARG_0:.*]]: tensor<3x3xf32>,
 // CHECK-SAME:      %[[ARG_1:.*]]: tensor<1x1x3xf32>) -> tensor<1x1x3xf32> {
-// CHECK:           %[[C0:.*]] = arith.constant 0 : index
-// CHECK:           %[[C1:.*]] = arith.constant 1 : index
-// CHECK:           %[[C2:.*]] = arith.constant 2 : index
-// CHECK:           %[[EXTRACT:.*]] = tensor.extract %[[ARG_0]]{{\[}}%[[C1]], %[[C2]]] : tensor<3x3xf32>
-// CHECK:           %[[BCAST:.*]] = vector.broadcast %[[EXTRACT]] : f32 to vector<1x1x3xf32>
-// CHECK:           %[[VAL_7:.*]] = vector.transfer_write %[[BCAST]], %[[ARG_1]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<1x1x3xf32>, tensor<1x1x3xf32>
-// CHECK:           return %[[VAL_7]] : tensor<1x1x3xf32>
-// CHECK:         }
+// CHECK-DAG:       %[[C1:.*]] = arith.constant 1 : index
+// CHECK-DAG:       %[[C2:.*]] = arith.constant 2 : index
+// CHECK-DAG:       arith.constant 0.000000e+00 : f32
+// CHECK-DAG:       %[[C0_f32:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK:           %[[READ:.*]] = vector.transfer_read  %[[ARG_0]][%[[C1]], %[[C2]]], %[[C0_f32]] {in_bounds = [true, true, true], permutation_map = #[[$MAP]]} : tensor<3x3xf32>, vector<1x1x3xf32>
+// CHECK:           %[[C0_4:.*]] = arith.constant 0 : index
+// CHECK:           vector.transfer_write %[[READ]], %[[ARG_1]][%[[C0_4]], %[[C0_4]], %[[C0_4]]]  : vector<1x1x3xf32>, tensor<1x1x3xf32>
 
 transform.sequence failures(propagate) {
  ^bb1(%arg1: !transform.any_op):
    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
-   %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
-   %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+  transform.structured.masked_vectorize %0 { vectorize_nd_extract }  : !transform.any_op
  }
 
 // -----


        


More information about the Mlir-commits mailing list