[Mlir-commits] [mlir] [MLIR][Vector] Fix transferOps optimization inside maskOp (PR #90835)
Hugo Trachino
llvmlistbot at llvm.org
Thu May 9 03:44:10 PDT 2024
https://github.com/nujaa updated https://github.com/llvm/llvm-project/pull/90835
>From 57863a4ee08bc7ec64e7ffcb8e4fe1f0978f57ba Mon Sep 17 00:00:00 2001
From: Hugo <hugo.trachino at huawei.com>
Date: Wed, 1 May 2024 18:49:16 +0800
Subject: [PATCH 1/3] [MLIR][Vector] Fix transferOps optimization inside maskOp
---
.../Tensor/Transforms/FoldTensorSubsetOps.cpp | 6 ++++
.../Vector/Transforms/LowerVectorTransfer.cpp | 9 ++++++
...nsor-subset-ops-into-vector-transfers.mlir | 15 +++++++++
.../vector-transfer-permutation-lowering.mlir | 31 +++++++++++++++++++
4 files changed, 61 insertions(+)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
index 3b8d3708bb731..ac63f93c1d756 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
@@ -100,11 +100,17 @@ LogicalResult TransferReadOfExtractSliceOpFolder::matchAndRewrite(
SmallVector<Value> indices(readOp.getIndices().begin(),
readOp.getIndices().end());
SmallVector<Value> sourceIndices;
+ // In case transfer_read is located inside a MaskOp we want to avoid creating
+ // more ops inside it.
+ if (isa<vector::MaskOp>(readOp->getParentOp()))
+ rewriter.setInsertionPoint(readOp->getParentOp());
affine::resolveIndicesIntoOpWithOffsetsAndStrides(
rewriter, readOp.getLoc(), extractSliceOp.getMixedOffsets(),
extractSliceOp.getMixedStrides(), extractSliceOp.getDroppedDims(),
indices, sourceIndices);
+ // Reset the insertion point.
+ rewriter.setInsertionPoint(readOp);
rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
readOp, readOp.getVectorType(), extractSliceOp.getSource(), sourceIndices,
AffineMapAttr::get(expandDimsToRank(
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index b30b43d70bf0f..51a9d52cbe388 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -98,6 +98,9 @@ struct TransferReadPermutationLowering
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ if (isa<vector::MaskOp>(op->getParentOp()))
+ return rewriter.notifyMatchFailure(
+ op, "Cannot expand transfer read inside a Mask Op");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -173,6 +176,9 @@ struct TransferWritePermutationLowering
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ if (isa<vector::MaskOp>(op->getParentOp()))
+ return rewriter.notifyMatchFailure(
+ op, "Cannot expand transfer write inside a Mask Op");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -239,6 +245,9 @@ struct TransferWriteNonPermutationLowering
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ if (isa<vector::MaskOp>(op->getParentOp()))
+ return rewriter.notifyMatchFailure(
+ op, "Cannot expand transfer write inside a Mask Op");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
diff --git a/mlir/test/Dialect/Tensor/fold-tensor-subset-ops-into-vector-transfers.mlir b/mlir/test/Dialect/Tensor/fold-tensor-subset-ops-into-vector-transfers.mlir
index 6213db3956f9a..214b41461b98f 100644
--- a/mlir/test/Dialect/Tensor/fold-tensor-subset-ops-into-vector-transfers.mlir
+++ b/mlir/test/Dialect/Tensor/fold-tensor-subset-ops-into-vector-transfers.mlir
@@ -111,3 +111,18 @@ func.func @insert_slice_of_transfer_write_rank_extending(%t1 : tensor<?x?x12xf32
%1 = tensor.insert_slice %0 into %t1[4, 3, %s] [1, 5, 6] [1, 1, 1] : tensor<5x6xf32> into tensor<?x?x12xf32>
return %1 : tensor<?x?x12xf32>
}
+
+// CHECK-LABEL: func @masked_transfer_read_of_extract_slice
+// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>, %[[s1:.*]]: index, %[[s2:.*]]: index
+// CHECK-DAG: %[[m:.*]] = vector.create_mask{{.*}} : vector<5x6xi1>
+// CHECK-DAG: %[[a:.*]] = affine.apply {{.*}}[[s1]]
+// CHECK: vector.mask %[[m]] { vector.transfer_read %[[t]]{{.*}}: tensor<?x?xf32>, vector<5x6xf32> } : vector<5x6xi1> -> vector<5x6xf32>
+func.func @masked_transfer_read_of_extract_slice(%t : tensor<?x?xf32>, %s1 : index, %s2 : index) -> vector<5x6xf32> {
+ %c3 = arith.constant 3 : index
+ %c4 = arith.constant 4 : index
+ %cst = arith.constant 0.0 : f32
+ %0 = tensor.extract_slice %t[5, %s1] [10, %s2] [1, 1] : tensor<?x?xf32> to tensor<10x?xf32>
+ %mask = vector.create_mask %c3, %c4 : vector<5x6xi1>
+ %1 = vector.mask %mask {vector.transfer_read %0[%c3, %c4], %cst {in_bounds = [true, true]} : tensor<10x?xf32>, vector<5x6xf32>} : vector<5x6xi1> -> vector<5x6xf32>
+ return %1 : vector<5x6xf32>
+}
diff --git a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
index 31bd19c0be8e8..ec2cd478923cc 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
@@ -59,6 +59,37 @@ func.func @permutation_with_mask_transfer_write_scalable(%arg0: vector<4x[8]xi16
return
}
+
+
+#map = affine_map<(d0)[s0] -> (-d0 + s0, 4)>
+#map1 = affine_map<(d0, d1) -> (d0, 0, d1)>
+// CHECK-LABEL: func @masked_permutation_transfer_read
+// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x1xf32>,
+// CHECK-SAME: %[[ARG_1:.*]]: vector<4x1xi1>
+// CHECK: vector.transfer_read %[[ARG_0]]{{.*}}: tensor<?x1xf32>, vector<4x4x1xf32> } : vector<4x1xi1> -> vector<4x4x1xf32>
+func.func @masked_permutation_transfer_read(%arg0: tensor<?x1xf32>, %mask : vector<4x1xi1>) {
+ %cst = arith.constant 0.000000e+00 : f32
+ %c0 = arith.constant 0 : index
+ %3 = vector.mask %mask { vector.transfer_read %arg0[%c0, %c0], %cst {permutation_map = #map1} : tensor<?x1xf32>, vector<4x4x1xf32> } : vector<4x1xi1> -> vector<4x4x1xf32>
+ call @dostuff(%3) : (vector<4x4x1xf32>) -> ()
+ return
+}
+func.func private @dostuff(vector<4x4x1xf32>)
+
+
+// CHECK-LABEL: func @masked_permutation_transfer_write
+// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x?xf32>,
+// CHECK-SAME: %[[ARG_1:.*]]: vector<16xf32>,
+// CHECK-SAME: %[[IDX:.*]]: index,
+// CHECK-SAME: %[[MASK:.*]]: vector<16xi1>
+// CHECK: %[[RES:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[ARG_1]], %[[ARG_0]][%[[IDX]], %[[IDX]]] {{.*}} vector<16xf32>, tensor<?x?xf32> } : vector<16xi1> -> tensor<?x?xf32>
+// CHECK: return %[[RES]]
+func.func @masked_permutation_transfer_write(%t: tensor<?x?xf32>, %val: vector<16xf32>, %idx: index, %m0: vector<16xi1>) -> tensor<?x?xf32> {
+ %r = vector.mask %m0 { vector.transfer_write %val, %t[%idx, %idx] {permutation_map = affine_map<(d0, d1) -> (d0)>} : vector<16xf32>, tensor<?x?xf32> } : vector<16xi1> -> tensor<?x?xf32>
+ return %r : tensor<?x?xf32>
+}
+
+
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
%f = transform.structured.match ops{["func.func"]} in %module_op
>From fa53c9c50701c0b97c0e27fe606d1b7e0d317761 Mon Sep 17 00:00:00 2001
From: Hugo <hugo.trachino at huawei.com>
Date: Tue, 7 May 2024 22:29:09 +0800
Subject: [PATCH 2/3] Use MaskableOp interface for FoldTensorSubsetOps and
LowerVectorTransfer
---
.../Dialect/Tensor/Transforms/CMakeLists.txt | 1 +
.../Tensor/Transforms/FoldTensorSubsetOps.cpp | 33 ++--
.../Vector/Transforms/LowerVectorTransfer.cpp | 155 ++++++++++--------
3 files changed, 104 insertions(+), 85 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
index c6ef6ed86e0d9..0aabdaf667b9d 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt
@@ -39,5 +39,6 @@ add_mlir_dialect_library(MLIRTensorTransforms
MLIRTilingInterface
MLIRTransforms
MLIRVectorDialect
+ MLIRVectorUtils
MLIRValueBoundsOpInterface
)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
index ac63f93c1d756..9bde33d3e5556 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
@@ -18,6 +18,7 @@
#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/Dialect/Vector/Utils/VectorUtils.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
@@ -48,12 +49,14 @@ static Value getTensorOperand(tensor::InsertSliceOp op) {
namespace {
/// Merge extract_slice operation with load/transferRead operation.
class TransferReadOfExtractSliceOpFolder final
- : public OpRewritePattern<vector::TransferReadOp> {
+ : public vector::MaskableOpRewritePattern<vector::TransferReadOp> {
public:
- using OpRewritePattern<vector::TransferReadOp>::OpRewritePattern;
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferReadOp readOp,
- PatternRewriter &rewriter) const override;
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferReadOp readOp,
+ vector::MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override;
};
/// Merge insert_slice operation with store/transferWriteOp operation.
@@ -84,8 +87,10 @@ static LogicalResult preconditionsFoldExtractOrInsertWithTransferOp(
return success();
}
-LogicalResult TransferReadOfExtractSliceOpFolder::matchAndRewrite(
- vector::TransferReadOp readOp, PatternRewriter &rewriter) const {
+FailureOr<mlir::Value>
+TransferReadOfExtractSliceOpFolder::matchAndRewriteMaskableOp(
+ vector::TransferReadOp readOp, vector::MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const {
auto extractSliceOp =
getTensorOperand(readOp).getDefiningOp<tensor::ExtractSliceOp>();
if (!extractSliceOp)
@@ -95,31 +100,29 @@ LogicalResult TransferReadOfExtractSliceOpFolder::matchAndRewrite(
preconditionsFoldExtractOrInsertWithTransferOp(rewriter, readOp,
extractSliceOp);
if (failed(preconditionResult))
- return preconditionResult;
+ return rewriter.notifyMatchFailure(readOp, "Failed preconditions");
SmallVector<Value> indices(readOp.getIndices().begin(),
readOp.getIndices().end());
SmallVector<Value> sourceIndices;
// In case transfer_read is located inside a MaskOp we want to avoid creating
// more ops inside it.
- if (isa<vector::MaskOp>(readOp->getParentOp()))
- rewriter.setInsertionPoint(readOp->getParentOp());
affine::resolveIndicesIntoOpWithOffsetsAndStrides(
rewriter, readOp.getLoc(), extractSliceOp.getMixedOffsets(),
extractSliceOp.getMixedStrides(), extractSliceOp.getDroppedDims(),
indices, sourceIndices);
- // Reset the insertion point.
- rewriter.setInsertionPoint(readOp);
- rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
- readOp, readOp.getVectorType(), extractSliceOp.getSource(), sourceIndices,
+ Operation *newOp = rewriter.create<vector::TransferReadOp>(
+ readOp.getLoc(), readOp.getVectorType(), extractSliceOp.getSource(),
+ sourceIndices,
AffineMapAttr::get(expandDimsToRank(
readOp.getPermutationMap(), extractSliceOp.getSourceType().getRank(),
extractSliceOp.getDroppedDims())),
readOp.getPadding(),
/*mask=*/Value(), readOp.getInBoundsAttr());
-
- return success();
+ if (maskOp)
+ newOp = mlir::vector::maskOperation(rewriter, newOp, maskOp.getMask());
+ return newOp->getResults()[0];
}
LogicalResult InsertSliceOfTransferWriteOpFolder::matchAndRewrite(
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index 51a9d52cbe388..d384165a83d0e 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -90,17 +90,18 @@ namespace {
/// Note that an alternative is to transform it to linalg.transpose +
/// vector.transfer_read to do the transpose in memory instead.
struct TransferReadPermutationLowering
- : public OpRewritePattern<vector::TransferReadOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferReadOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferReadOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferReadOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
- if (isa<vector::MaskOp>(op->getParentOp()))
- return rewriter.notifyMatchFailure(
- op, "Cannot expand transfer read inside a Mask Op");
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -145,9 +146,9 @@ struct TransferReadPermutationLowering
// Transpose result of transfer_read.
SmallVector<int64_t> transposePerm(permutation.begin(), permutation.end());
- rewriter.replaceOpWithNewOp<vector::TransposeOp>(op, newRead,
- transposePerm);
- return success();
+ return rewriter
+ .create<vector::TransposeOp>(op.getLoc(), newRead, transposePerm)
+ .getResult();
}
};
@@ -168,17 +169,18 @@ struct TransferReadPermutationLowering
/// %v = vector.transfer_write %tmp ...
/// permutation_map: (d0, d1, d2, d3) -> (d2, d3)
struct TransferWritePermutationLowering
- : public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferWriteOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferWriteOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferWriteOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
- if (isa<vector::MaskOp>(op->getParentOp()))
- return rewriter.notifyMatchFailure(
- op, "Cannot expand transfer write inside a Mask Op");
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -213,11 +215,11 @@ struct TransferWritePermutationLowering
op.getLoc(), op.getVector(), indices);
auto newMap = AffineMap::getMinorIdentityMap(
map.getNumDims(), map.getNumResults(), rewriter.getContext());
- rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
- op, newVec, op.getSource(), op.getIndices(), AffineMapAttr::get(newMap),
- op.getMask(), newInBoundsAttr);
-
- return success();
+ return rewriter
+ .create<vector::TransferWriteOp>(
+ op.getLoc(), newVec, op.getSource(), op.getIndices(),
+ AffineMapAttr::get(newMap), op.getMask(), newInBoundsAttr)
+ .getResult();
}
};
@@ -237,17 +239,18 @@ struct TransferWritePermutationLowering
/// vector<1x8x16xf32>
/// ```
struct TransferWriteNonPermutationLowering
- : public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferWriteOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferWriteOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferWriteOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
- if (isa<vector::MaskOp>(op->getParentOp()))
- return rewriter.notifyMatchFailure(
- op, "Cannot expand transfer write inside a Mask Op");
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -294,10 +297,11 @@ struct TransferWriteNonPermutationLowering
newInBoundsValues.push_back(op.isDimInBounds(i));
}
ArrayAttr newInBoundsAttr = rewriter.getBoolArrayAttr(newInBoundsValues);
- rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
- op, newVec, op.getSource(), op.getIndices(), AffineMapAttr::get(newMap),
- newMask, newInBoundsAttr);
- return success();
+ return rewriter
+ .create<vector::TransferWriteOp>(
+ op.getLoc(), newVec, op.getSource(), op.getIndices(),
+ AffineMapAttr::get(newMap), newMask, newInBoundsAttr)
+ .getResult();
}
};
@@ -309,14 +313,19 @@ struct TransferWriteNonPermutationLowering
/// %v = vector.transfer_read ...
/// permutation_map: (d0, d1, d2, d3) -> (d1, 0, d3)
/// vector.broadcast %v
-struct TransferOpReduceRank : public OpRewritePattern<vector::TransferReadOp> {
- using OpRewritePattern::OpRewritePattern;
-
- LogicalResult matchAndRewrite(vector::TransferReadOp op,
- PatternRewriter &rewriter) const override {
+struct TransferOpReduceRank
+ : public MaskableOpRewritePattern<vector::TransferReadOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
+
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferReadOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
AffineMap map = op.getPermutationMap();
unsigned numLeadingBroadcast = 0;
@@ -356,9 +365,9 @@ struct TransferOpReduceRank : public OpRewritePattern<vector::TransferReadOp> {
op.getLoc(), originalVecType.getElementType(), op.getSource(),
op.getIndices());
}
- rewriter.replaceOpWithNewOp<vector::BroadcastOp>(op, originalVecType,
- newRead);
- return success();
+ return rewriter
+ .create<vector::BroadcastOp>(op.getLoc(), originalVecType, newRead)
+ .getVector();
}
SmallVector<int64_t> newShape(
@@ -380,9 +389,9 @@ struct TransferOpReduceRank : public OpRewritePattern<vector::TransferReadOp> {
op.getLoc(), newReadType, op.getSource(), op.getIndices(),
AffineMapAttr::get(newMap), op.getPadding(), op.getMask(),
newInBoundsAttr);
- rewriter.replaceOpWithNewOp<vector::BroadcastOp>(op, originalVecType,
- newRead);
- return success();
+ return rewriter
+ .create<vector::BroadcastOp>(op.getLoc(), originalVecType, newRead)
+ .getVector();
}
};
@@ -410,20 +419,23 @@ namespace {
/// result type.
/// - The permutation map doesn't perform permutation (broadcasting is allowed).
struct TransferReadToVectorLoadLowering
- : public OpRewritePattern<vector::TransferReadOp> {
+ : public MaskableOpRewritePattern<vector::TransferReadOp> {
TransferReadToVectorLoadLowering(MLIRContext *context,
std::optional<unsigned> maxRank,
PatternBenefit benefit = 1)
- : OpRewritePattern<vector::TransferReadOp>(context, benefit),
+ : MaskableOpRewritePattern<vector::TransferReadOp>(context, benefit),
maxTransferRank(maxRank) {}
- LogicalResult matchAndRewrite(vector::TransferReadOp read,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferReadOp read,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
if (maxTransferRank && read.getVectorType().getRank() > *maxTransferRank) {
return rewriter.notifyMatchFailure(
read, "vector type is greater than max transfer rank");
}
-
+ if (maskOp)
+ return rewriter.notifyMatchFailure(read, "Masked case not supported");
SmallVector<unsigned> broadcastedDims;
// Permutations are handled by VectorToSCF or
// populateVectorTransferPermutationMapLoweringPatterns.
@@ -466,7 +478,7 @@ struct TransferReadToVectorLoadLowering
return rewriter.notifyMatchFailure(read, "out-of-bounds needs mask");
// Create vector load op.
- Operation *loadOp;
+ Operation *res;
if (read.getMask()) {
if (read.getVectorType().getRank() != 1)
// vector.maskedload operates on 1-D vectors.
@@ -476,24 +488,20 @@ struct TransferReadToVectorLoadLowering
Value fill = rewriter.create<vector::SplatOp>(
read.getLoc(), unbroadcastedVectorType, read.getPadding());
- loadOp = rewriter.create<vector::MaskedLoadOp>(
+ res = rewriter.create<vector::MaskedLoadOp>(
read.getLoc(), unbroadcastedVectorType, read.getSource(),
read.getIndices(), read.getMask(), fill);
} else {
- loadOp = rewriter.create<vector::LoadOp>(
+ res = rewriter.create<vector::LoadOp>(
read.getLoc(), unbroadcastedVectorType, read.getSource(),
read.getIndices());
}
// Insert a broadcasting op if required.
- if (!broadcastedDims.empty()) {
- rewriter.replaceOpWithNewOp<vector::BroadcastOp>(
- read, read.getVectorType(), loadOp->getResult(0));
- } else {
- rewriter.replaceOp(read, loadOp->getResult(0));
- }
-
- return success();
+ if (!broadcastedDims.empty())
+ res = rewriter.create<vector::BroadcastOp>(
+ read.getLoc(), read.getVectorType(), res->getResult(0));
+ return res->getResults()[0];
}
std::optional<unsigned> maxTransferRank;
@@ -562,19 +570,23 @@ struct VectorStoreToMemrefStoreLowering
/// - The permutation map is the minor identity map (neither permutation nor
/// broadcasting is allowed).
struct TransferWriteToVectorStoreLowering
- : public OpRewritePattern<vector::TransferWriteOp> {
+ : public MaskableOpRewritePattern<vector::TransferWriteOp> {
TransferWriteToVectorStoreLowering(MLIRContext *context,
std::optional<unsigned> maxRank,
PatternBenefit benefit = 1)
- : OpRewritePattern<vector::TransferWriteOp>(context, benefit),
+ : MaskableOpRewritePattern<vector::TransferWriteOp>(context, benefit),
maxTransferRank(maxRank) {}
- LogicalResult matchAndRewrite(vector::TransferWriteOp write,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferWriteOp write,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
if (maxTransferRank && write.getVectorType().getRank() > *maxTransferRank) {
return rewriter.notifyMatchFailure(
write, "vector type is greater than max transfer rank");
}
+ if (maskOp)
+ return rewriter.notifyMatchFailure(write, "Masked case not supported");
// Permutations are handled by VectorToSCF or
// populateVectorTransferPermutationMapLoweringPatterns.
@@ -626,14 +638,17 @@ struct TransferWriteToVectorStoreLowering
<< write;
});
- rewriter.replaceOpWithNewOp<vector::MaskedStoreOp>(
- write, write.getSource(), write.getIndices(), write.getMask(),
- write.getVector());
+ return rewriter
+ .create<vector::MaskedStoreOp>(write.getLoc(), write.getSource(),
+ write.getIndices(), write.getMask(),
+ write.getVector())
+ .getBase();
} else {
- rewriter.replaceOpWithNewOp<vector::StoreOp>(
- write, write.getVector(), write.getSource(), write.getIndices());
+ return rewriter
+ .create<vector::StoreOp>(write.getLoc(), write.getVector(),
+ write.getSource(), write.getIndices())
+ .getBase();
}
- return success();
}
std::optional<unsigned> maxTransferRank;
>From dc6d1eef5204636ba517c1c4f9066800c0c01627 Mon Sep 17 00:00:00 2001
From: Hugo <hugo.trachino at huawei.com>
Date: Wed, 8 May 2024 00:21:00 +0800
Subject: [PATCH 3/3] Fixup
---
mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp | 2 --
mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp | 1 +
2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
index 9bde33d3e5556..5396531922aab 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
@@ -105,8 +105,6 @@ TransferReadOfExtractSliceOpFolder::matchAndRewriteMaskableOp(
SmallVector<Value> indices(readOp.getIndices().begin(),
readOp.getIndices().end());
SmallVector<Value> sourceIndices;
- // In case transfer_read is located inside a MaskOp we want to avoid creating
- // more ops inside it.
affine::resolveIndicesIntoOpWithOffsetsAndStrides(
rewriter, readOp.getLoc(), extractSliceOp.getMixedOffsets(),
extractSliceOp.getMixedStrides(), extractSliceOp.getDroppedDims(),
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index d384165a83d0e..9c5e54c7e2606 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -434,6 +434,7 @@ struct TransferReadToVectorLoadLowering
return rewriter.notifyMatchFailure(
read, "vector type is greater than max transfer rank");
}
+
if (maskOp)
return rewriter.notifyMatchFailure(read, "Masked case not supported");
SmallVector<unsigned> broadcastedDims;
More information about the Mlir-commits
mailing list