[Mlir-commits] [mlir] [mlir][vector] Add mask to transfer_read/transfer_write when vectorizing (PR #76936)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Jan 4 02:35:33 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir

Author: Hsiangkai Wang (Hsiangkai)

<details>
<summary>Changes</summary>

When we deal with 1-rank vectorization, we can add mask to transfer_read and transfer_write when the iteration is not divisible by the vector size.

---
Full diff: https://github.com/llvm/llvm-project/pull/76936.diff


2 Files Affected:

- (modified) mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp (+54-5) 
- (modified) mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir (+11-3) 


``````````diff
diff --git a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
index 6b7a157925fae1..ec0e65bc5df892 100644
--- a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
@@ -1200,8 +1200,32 @@ static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
   LLVM_DEBUG(permutationMap.print(dbgs()));
 
-  auto transfer = state.builder.create<vector::TransferReadOp>(
-      loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
+  VectorType inferredMaskType =
+      inferTransferOpMaskType(vectorType, permutationMap);
+  auto inferredShape = inferredMaskType.getShape();
+  Value mask = Value();
+  int vectorRank = state.strategy->vectorSizes.size();
+  if (vectorRank == 1 && inferredShape.size() == 1 &&
+      inferredShape[0] == state.strategy->vectorSizes[0]) {
+    Operation *loopOp = state.builder.getInsertionBlock()->getParentOp();
+    auto affineFor = cast<AffineForOp>(loopOp);
+    if (affineFor.getStep() == state.strategy->vectorSizes[0])
+      mask = createMask(affineFor, state);
+  }
+
+  Operation *transfer;
+  Location loc = loadOp.getLoc();
+  if (mask) {
+    Value padding = state.builder.create<arith::ConstantOp>(
+        loc, elementType, state.builder.getZeroAttr(elementType));
+    SmallVector<bool> inBounds(vectorRank, false);
+    transfer = state.builder.create<vector::TransferReadOp>(
+        loc, vectorType, loadOp.getMemRef(), indices, permutationMap, padding,
+        mask, state.builder.getBoolArrayAttr(inBounds));
+  } else {
+    transfer = state.builder.create<vector::TransferReadOp>(
+        loc, vectorType, loadOp.getMemRef(), indices, permutationMap);
+  }
 
   // Register replacement for future uses in the scope.
   state.registerOpVectorReplacement(loadOp, transfer);
@@ -1243,9 +1267,34 @@ static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
   LLVM_DEBUG(permutationMap.print(dbgs()));
 
-  auto transfer = state.builder.create<vector::TransferWriteOp>(
-      storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
-      permutationMap);
+  Type elementType = memRefType.getElementType();
+  auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
+  VectorType inferredMaskType =
+      inferTransferOpMaskType(vectorType, permutationMap);
+  auto inferredShape = inferredMaskType.getShape();
+  Value mask = Value();
+  int vectorRank = state.strategy->vectorSizes.size();
+  if (vectorRank == 1 && inferredShape.size() == 1 &&
+      inferredShape[0] == state.strategy->vectorSizes[0]) {
+    Operation *loopOp = state.builder.getInsertionBlock()->getParentOp();
+    auto affineFor = cast<AffineForOp>(loopOp);
+    if (affineFor.getStep() == state.strategy->vectorSizes[0])
+      mask = createMask(affineFor, state);
+  }
+
+  Operation *transfer;
+  Location loc = storeOp.getLoc();
+  if (mask) {
+    SmallVector<bool> inBounds(vectorRank, false);
+    transfer = state.builder.create<vector::TransferWriteOp>(
+        loc, vectorValue, storeOp.getMemRef(), indices,
+        AffineMapAttr::get(permutationMap), mask,
+        state.builder.getBoolArrayAttr(inBounds));
+  } else {
+    transfer = state.builder.create<vector::TransferWriteOp>(
+        loc, vectorValue, storeOp.getMemRef(), indices, permutationMap);
+  }
+
   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
 
   // Register replacement for future uses in the scope.
diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
index 9244604128cb72..6ed450cd85f646 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
@@ -31,6 +31,8 @@ func.func @vec1d_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // -----
 
+// CHECK-DAG: #[[$map_id1:map[0-9a-zA-Z_]*]] = affine_map<(d0)[s0] -> (-d0 + s0)>
+
 // CHECK-LABEL: func @vec1d_2
 func.func @vec1d_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
@@ -47,8 +49,10 @@ func.func @vec1d_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
    %P = memref.dim %B, %c2 : memref<?x?x?xf32>
 
 // CHECK:for [[IV3:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
+// CHECK-NEXT: %[[S1:.*]] = affine.apply #[[$map_id1]]([[IV3]])[[[ARG_M]]]
+// CHECK-NEXT: %[[S2:.*]] = vector.create_mask %[[S1]] : vector<128xi1>
 // CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}}: f32
-// CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %[[CST]] : memref<?x?xf32>, vector<128xf32>
+// CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %[[CST]], %[[S2]] : memref<?x?xf32>, vector<128xf32>
    affine.for %i3 = 0 to %M { // vectorized
      %a3 = affine.load %A[%c0, %i3] : memref<?x?xf32>
    }
@@ -88,6 +92,8 @@ func.func @vec1d_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // -----
 
+// CHECK-DAG: #[[$map_id1:map[0-9a-zA-Z_]*]] = affine_map<(d0)[s0] -> (-d0 + s0)>
+
 // CHECK-LABEL: func @vector_add_2d
 func.func @vector_add_2d(%M : index, %N : index) -> f32 {
   %A = memref.alloc (%M, %N) : memref<?x?xf32, 0>
@@ -115,8 +121,10 @@ func.func @vector_add_2d(%M : index, %N : index) -> f32 {
     affine.for %i5 = 0 to %N {
       // CHECK: %[[SPLAT2:.*]] = arith.constant dense<2.000000e+00> : vector<128xf32>
       // CHECK: %[[SPLAT1:.*]] = arith.constant dense<1.000000e+00> : vector<128xf32>
-      // CHECK: %[[A5:.*]] = vector.transfer_read %{{.*}}[{{.*}}], %{{[a-zA-Z0-9_]*}} : memref<?x?xf32>, vector<128xf32>
-      // CHECK: %[[B5:.*]] = vector.transfer_read %{{.*}}[{{.*}}], %{{[a-zA-Z0-9_]*}} : memref<?x?xf32>, vector<128xf32>
+      // CHECK: %[[S1:.*]] = affine.apply #[[$map_id1]]
+      // CHECK: %[[S2:.*]] = vector.create_mask %[[S1]] : vector<128xi1>
+      // CHECK: %[[A5:.*]] = vector.transfer_read %{{.*}}[{{.*}}], %{{[a-zA-Z0-9_]*}}, %[[S2]] : memref<?x?xf32>, vector<128xf32>
+      // CHECK: %[[B5:.*]] = vector.transfer_read %{{.*}}[{{.*}}], %{{[a-zA-Z0-9_]*}}, %[[S2]] : memref<?x?xf32>, vector<128xf32>
       // CHECK: %[[S5:.*]] = arith.addf %[[A5]], %[[B5]] : vector<128xf32>
       // CHECK: %[[S6:.*]] = arith.addf %[[S5]], %[[SPLAT1]] : vector<128xf32>
       // CHECK: %[[S7:.*]] = arith.addf %[[S5]], %[[SPLAT2]] : vector<128xf32>

``````````

</details>


https://github.com/llvm/llvm-project/pull/76936


More information about the Mlir-commits mailing list