[Mlir-commits] [mlir] [mlir][vector] Clean up VectorTransferOpInterface (PR #72353)
Matthias Springer
llvmlistbot at llvm.org
Tue Nov 14 22:40:48 PST 2023
https://github.com/matthias-springer created https://github.com/llvm/llvm-project/pull/72353
- Better documentation.
- Rename interface methods: `source` -> `getSource`, `indices` -> `getIndices`, etc. to conform with MLIR naming conventions. A default implementation is not needed.
- Turn many interface methods into helper functions. Most of the previous interface methods were not meant to be overridden, and if some of the would have been overridden without other, the op would be have been broken.
>From 33c618d64b7b68a11d44b24b5a0ef5d9f7342356 Mon Sep 17 00:00:00 2001
From: Matthias Springer <springerm at google.com>
Date: Wed, 15 Nov 2023 15:34:42 +0900
Subject: [PATCH] [mlir][vector] Clean up VectorTransferOpInterface
- Better documentation.
- Rename interface methods: `source` -> `getSource`, `indices` -> `getIndices`, etc. to conform with MLIR naming conventions. A default implementation is not needed.
- Turn many interface methods into helper functions. Most of the previous interface methods were not meant to be overridden, and if some of the would have been overridden without other, the op would be have been broken.
---
.../mlir/Interfaces/VectorInterfaces.td | 437 +++++++++---------
mlir/lib/Dialect/Vector/IR/VectorOps.cpp | 22 +-
.../VectorTransferSplitRewritePatterns.cpp | 38 +-
3 files changed, 251 insertions(+), 246 deletions(-)
diff --git a/mlir/include/mlir/Interfaces/VectorInterfaces.td b/mlir/include/mlir/Interfaces/VectorInterfaces.td
index 66b1b0b70696e8e..ebf1a03b5e8ad80 100644
--- a/mlir/include/mlir/Interfaces/VectorInterfaces.td
+++ b/mlir/include/mlir/Interfaces/VectorInterfaces.td
@@ -25,8 +25,8 @@ def VectorUnrollOpInterface : OpInterface<"VectorUnrollOpInterface"> {
InterfaceMethod<
/*desc=*/[{
Return the shape ratio of unrolling to the target vector shape
- `targetShape`. Return `std::nullopt` if the op cannot be unrolled to the target
- vector shape.
+ `targetShape`. Return `std::nullopt` if the op cannot be unrolled to the
+ target vector shape.
}],
/*retTy=*/"::std::optional<::llvm::SmallVector<int64_t, 4>>",
/*methodName=*/"getShapeForUnroll",
@@ -34,10 +34,12 @@ def VectorUnrollOpInterface : OpInterface<"VectorUnrollOpInterface"> {
/*methodBody=*/"",
/*defaultImplementation=*/[{
assert($_op->getNumResults() == 1);
- auto vt = ::llvm::dyn_cast<::mlir::VectorType>($_op.getResult().getType());
+ auto vt =
+ ::llvm::dyn_cast<::mlir::VectorType>($_op.getResult().getType());
if (!vt)
return ::std::nullopt;
- ::llvm::SmallVector<int64_t, 4> res(vt.getShape().begin(), vt.getShape().end());
+ ::llvm::SmallVector<int64_t, 4> res(
+ vt.getShape().begin(), vt.getShape().end());
return res;
}]
>,
@@ -46,246 +48,249 @@ def VectorUnrollOpInterface : OpInterface<"VectorUnrollOpInterface"> {
def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
let description = [{
- Encodes properties of a transfer read or write operation.
+ Encodes properties of a `vector.transfer_read` or `vector.transfer_write`
+ operation. Vector transfer ops have:
+
+ - A shaped value that the op reads from/writes to: a memref or a tensor.
+ - A vector, either as a result or as an operand.
+ - Indicies that describe where the transfer from/to the shaped value starts.
+ - An optional mask.
+ - An optional in_bounds array to indicate transfer dimensions that are
+ guaranteed to be in-bounds.
+ - A permutation map to indicate transposes and broadcasts.
+
+ The "vector rank" is the rank of the vector type. E.g.:
+ ```
+ // Transfer with shaped value rank 2 and vector (transfer) rank 1.
+ %0 = vector.transfer_read %arg0[%c3, %c3], %f0
+ {permutation_map = affine_map<(d0, d1) -> (d0)>}
+ : memref<?x?xf32>, vector<128xf32>
+ ```
+
+ The "vector transfer rank" is the number of dimensions that participate in
+ the transfer and matches the number of results in the permutation map. In
+ most cases, the vector rank matches the vector transfer rank; the only
+ exception is when a vector is flattened as part of the transfer (see
+ `getPermutationMap`).
}];
let cppNamespace = "::mlir";
let methods = [
- StaticInterfaceMethod<
- /*desc=*/"Return the `in_bounds` attribute name.",
+ InterfaceMethod<
+ /*desc=*/[{
+ Return the `in_bounds` attribute name.
+ }],
/*retTy=*/"::mlir::StringRef",
- /*methodName=*/"getInBoundsAttrStrName",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/ [{ return "in_bounds"; }]
+ /*methodName=*/"getInBoundsAttrName",
+ /*args=*/(ins)
>,
- StaticInterfaceMethod<
- /*desc=*/"Return the `permutation_map` attribute name.",
+ InterfaceMethod<
+ /*desc=*/[{
+ Return the `permutation_map` attribute name.
+ }],
/*retTy=*/"::mlir::StringRef",
- /*methodName=*/"getPermutationMapAttrStrName",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/ [{ return "permutation_map"; }]
+ /*methodName=*/"getPermutationMapAttrName",
+ /*args=*/(ins)
>,
InterfaceMethod<
- /*desc=*/[{ Return `true` if dimension `dim` is in-bounds. Return `false`
- otherwise. }],
- /*retTy=*/"bool",
- /*methodName=*/"isDimInBounds",
- /*args=*/(ins "unsigned":$dim),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- return $_op.isBroadcastDim(dim)
- || ($_op.getInBounds()
- && cast<::mlir::BoolAttr>(cast<::mlir::ArrayAttr>(*$_op.getInBounds())[dim]).getValue());
- }]
+ /*desc=*/[{
+ Return the optional in_bounds attribute that specifies for each vector
+ dimension whether it is in-bounds or not. (Broadcast dimensions are
+ always in-bounds).
+ }],
+ /*retTy=*/"::std::optional<::mlir::ArrayAttr>",
+ /*methodName=*/"getInBounds",
+ /*args=*/(ins)
>,
InterfaceMethod<
- /*desc=*/"Return the memref or ranked tensor operand.",
+ /*desc=*/[{
+ Return the memref or ranked tensor operand that this operation operates
+ on. In case of a "read" operation, that's the source from which the
+ operation reads. In case of a "write" operation, that's the destination
+ into which the operation writes.
+ TODO: Change name of operand, which is not accurate for xfer_write.
+ }],
/*retTy=*/"::mlir::Value",
- /*methodName=*/"source",
- /*args=*/(ins),
- /*methodBody=*/"return $_op.getSource();"
- /*defaultImplementation=*/
+ /*methodName=*/"getSource",
+ /*args=*/(ins)
>,
InterfaceMethod<
- /*desc=*/"Return the vector operand or result.",
+ /*desc=*/[{
+ Return the vector that this operation operates on. In case of a "read",
+ that's the vector OpResult. In case of a "write", that's the vector
+ operand value that is written by the op.
+ }],
/*retTy=*/"::mlir::Value",
- /*methodName=*/"vector",
- /*args=*/(ins),
- /*methodBody=*/"return $_op.getVector();"
- /*defaultImplementation=*/
+ /*methodName=*/"getVector",
+ /*args=*/(ins)
>,
InterfaceMethod<
- /*desc=*/"Return the indices operands.",
- /*retTy=*/"::mlir::ValueRange",
- /*methodName=*/"indices",
- /*args=*/(ins),
- /*methodBody=*/"return $_op.getIndices();"
- /*defaultImplementation=*/
+ /*desc=*/[{
+ Return the indices that specify the starting offsets into the source
+ operand. The indices are guaranteed to be in-bounds.
+ }],
+ /*retTy=*/"::mlir::OperandRange",
+ /*methodName=*/"getIndices",
+ /*args=*/(ins)
>,
InterfaceMethod<
- /*desc=*/"Return the permutation map.",
+ /*desc=*/[{
+ Return the permutation map that describes the mapping of vector
+ dimensions to source dimensions, as well as broadcast dimensions.
+
+ The permutation result has one result per vector transfer dimension.
+ Each result is either a dim expression, indicating the corresponding
+ dimension in the source operand, or a constant "0" expression,
+ indicating a broadcast dimension.
+
+ Note: Nested dimensions of flattened vector do are not accounted for in
+ the permutation map. E.g.:
+ ```
+ // Vector type has rank 4, but permutation map has only 2 results. That
+ // is because there are only 2 transfer dimensions.
+ %0 = vector.transfer_read %arg1[%c3, %c3], %vf0
+ {permutation_map = affine_map<(d0, d1) -> (d0, d1)>}
+ : memref<?x?xvector<4x3xf32>>, vector<1x1x4x3xf32>
+ ```
+ }],
/*retTy=*/"::mlir::AffineMap",
/*methodName=*/"getPermutationMap",
- /*args=*/(ins),
- /*methodBody=*/"return $_op.getPermutationMap();"
- /*defaultImplementation=*/
- >,
- InterfaceMethod<
- /*desc=*/[{ Returns true if the specified dimension is a broadcast. }],
- /*retTy=*/"bool",
- /*methodName=*/"isBroadcastDim",
- /*args=*/(ins "unsigned":$idx),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- auto expr = $_op.getPermutationMap().getResult(idx);
- return ::llvm::isa<::mlir::AffineConstantExpr>(expr) &&
- ::llvm::dyn_cast<::mlir::AffineConstantExpr>(expr).getValue() == 0;
- }]
- >,
- InterfaceMethod<
- /*desc=*/[{ Returns true if at least one of the dimensions in the
- permutation map is a broadcast.}],
- /*retTy=*/"bool",
- /*methodName=*/"hasBroadcastDim",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- for (unsigned i = 0, rank = getTransferRank(); i < rank; ++i) {
- if ($_op.isBroadcastDim(i))
- return true;
- }
- return false;
- }]
+ /*args=*/(ins)
>,
InterfaceMethod<
- /*desc=*/"Return a vector of all in_bounds values as booleans.",
- /*retTy=*/"::llvm::SmallVector<bool>",
- /*methodName=*/"getInBoundsValues",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- ::llvm::SmallVector<bool> inBounds;
- for (int64_t i = 0, e = $_op.getTransferRank(); i < e; ++i)
- inBounds.push_back($_op.isDimInBounds(i));
- return inBounds;
- }]
- >,
- InterfaceMethod<
- /*desc=*/"Return the ShapedType.",
- /*retTy=*/"::mlir::ShapedType",
- /*methodName=*/"getShapedType",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/
- "return ::llvm::cast<::mlir::ShapedType>($_op.getSource().getType());"
- >,
- InterfaceMethod<
- /*desc=*/"Return the VectorType.",
- /*retTy=*/"::mlir::VectorType",
- /*methodName=*/"getVectorType",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- return ::llvm::dyn_cast<::mlir::VectorType>($_op.getVector().getType());
- }]
- >,
- InterfaceMethod<
- /*desc=*/"Return the mask operand if the op has a mask. Otherwise, "
- "return a empty value.",
+ /*desc=*/[{
+ Return the mask operand if the op has a mask. Otherwise, return an
+ empty value.
+ }],
/*retTy=*/"Value",
/*methodName=*/"getMask",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- return $_op.getMask();
- }]
- >,
- InterfaceMethod<
- /*desc=*/"Return the mask type if the op has a mask. Otherwise, return "
- "an empty VectorType.",
- /*retTy=*/"::mlir::VectorType",
- /*methodName=*/"getMaskType",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- return $_op.getMask() ? $_op.getMask().getType() : ::mlir::VectorType();
- }]
- >,
- InterfaceMethod<
- /*desc=*/[{ Return the number of dimensions that participate in the
- permutation map.}],
- /*retTy=*/"unsigned",
- /*methodName=*/"getTransferRank",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/
- "return $_op.getPermutationMap().getNumResults();"
- >,
- InterfaceMethod<
- /*desc=*/[{ Return the number of leading shaped dimensions that do not
- participate in the permutation map.}],
- /*retTy=*/"unsigned",
- /*methodName=*/"getLeadingShapedRank",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/
- "return $_op.getShapedType().getRank() - $_op.getTransferRank();"
- >,
- InterfaceMethod<
- /*desc=*/[{ Returns true if at least one of the dimensions may be
- out-of-bounds.}],
- /*retTy=*/"bool",
- /*methodName=*/"hasOutOfBoundsDim",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- for (unsigned idx = 0, e = $_op.getTransferRank(); idx < e; ++idx)
- if (!$_op.isDimInBounds(idx))
- return true;
+ /*args=*/(ins)
+ >
+ ];
+
+ let extraSharedClassDeclaration = [{
+ /// Return a vector of all in_bounds values as booleans (one per vector
+ /// transfer dimension).
+ ::llvm::SmallVector<bool> getInBoundsValues() {
+ ::llvm::SmallVector<bool> inBounds;
+ for (int64_t i = 0, e = $_op.getTransferRank(); i < e; ++i)
+ inBounds.push_back($_op.isDimInBounds(i));
+ return inBounds;
+ }
+
+ /// Return the number of leading shaped dimensions (of the "source" operand)
+ /// that do not participate in the permutation map.
+ unsigned getLeadingShapedRank() {
+ return $_op.getShapedType().getRank() - $_op.getTransferRank();
+ }
+
+ /// Return the mask type if the op has a mask. Otherwise, return an empty
+ /// VectorType.
+ ::mlir::VectorType getMaskType() {
+ return $_op.getMask()
+ ? ::llvm::cast<::mlir::VectorType>($_op.getMask().getType())
+ : ::mlir::VectorType();
+ }
+
+ /// Return the shaped type of the "source" operand value.
+ ::mlir::ShapedType getShapedType() {
+ return ::llvm::cast<::mlir::ShapedType>($_op.getSource().getType());
+ }
+
+ /// Return the number of dimensions that participate in the permutation map.
+ unsigned getTransferRank() {
+ return $_op.getPermutationMap().getNumResults();
+ }
+
+ /// Return the type of the vector that this operation operates on.
+ ::mlir::VectorType getVectorType() {
+ return ::llvm::cast<::mlir::VectorType>($_op.getVector().getType());
+ }
+
+ /// Return "true" if at least one of the vector dimensions is a broadcasted
+ /// dimension.
+ bool hasBroadcastDim() {
+ for (unsigned i = 0, e = $_op.getTransferRank(); i < e; ++i) {
+ if ($_op.isBroadcastDim(i))
+ return true;
+ }
+ return false;
+ }
+
+ /// Return "true" if at least one of the vector dimensions may be
+ /// out-of-bounds.
+ bool hasOutOfBoundsDim() {
+ for (unsigned idx = 0, e = $_op.getTransferRank(); idx < e; ++idx)
+ if (!$_op.isDimInBounds(idx))
+ return true;
+ return false;
+ }
+
+ /// Return "true" if the specified vector transfer dimension is a
+ /// broadcasted dimension.
+ bool isBroadcastDim(unsigned dim) {
+ auto expr = $_op.getPermutationMap().getResult(dim);
+ auto constExpr = ::llvm::dyn_cast<::mlir::AffineConstantExpr>(expr);
+ return constExpr && constExpr.getValue() == 0;
+ }
+
+ /// Return "true" if the vector transfer dimension `dim` is in-bounds. Also
+ /// return "true" if the dimension is a broadcast dimension. Return "false"
+ /// otherwise.
+ bool isDimInBounds(unsigned dim) {
+ if ($_op.isBroadcastDim(dim))
+ return true;
+ if (!$_op.getInBounds())
return false;
- }]
- >,
- InterfaceMethod<
- /*desc=*/[{
- Helper function to account for the fact that `permutationMap` results and
- `op.indices` sizes may not match and may not be aligned. The first
- `getLeadingShapedRank()` indices may just be indexed and not
- transferred from/into the vector.
- For example:
- ```
- vector.transfer %0[%i, %j, %k, %c0] :
- memref<?x?x?x?xf32>, vector<2x4xf32>
- ```
- with `permutation_map = (d0, d1, d2, d3) -> (d2, d3)`.
- Provide a zip function to coiterate on 2 running indices: `resultIdx` and
- `indicesIdx` which accounts for this misalignment.
- }],
- /*retTy=*/"void",
- /*methodName=*/"zipResultAndIndexing",
- /*args=*/(ins "::llvm::function_ref<void(int64_t, int64_t)>":$fun),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- for (int64_t resultIdx = 0,
+ auto inBounds = cast<::mlir::ArrayAttr>(*$_op.getInBounds());
+ return cast<::mlir::BoolAttr>(inBounds[dim]).getValue();
+ }
+
+ /// Helper function to account for the fact that `permutationMap` results
+ /// and `op.getIndices` sizes may not match and may not be aligned. The
+ /// first `getLeadingShapedRank()` indices may just be indexed and not
+ /// transferred from/into the vector.
+ /// For example:
+ /// ```
+ /// vector.transfer %0[%i, %j, %k, %c0]
+ /// : memref<?x?x?x?xf32>, vector<2x4xf32>
+ /// ```
+ /// with `permutation_map = (d0, d1, d2, d3) -> (d2, d3)`.
+ /// Provide a zip function to coiterate on 2 running indices: `resultIdx`
+ /// and `indicesIdx` which accounts for this misalignment.
+ void zipResultAndIndexing(
+ ::llvm::function_ref<void(int64_t, int64_t)> fun) {
+ for (int64_t resultIdx = 0,
indicesIdx = $_op.getLeadingShapedRank(),
eResult = $_op.getTransferRank();
- resultIdx < eResult;
- ++resultIdx, ++indicesIdx)
- fun(resultIdx, indicesIdx);
- }]
- >,
- InterfaceMethod<
- /*desc=*/[{
- Return the shape of the hyperrectangular slice within the tensor/memref
- operand that is accessed by the transfer op.
- For example:
- ```
- vector.transfer %w0[%i, %j, %k] {
- permutation_map = affine_map<(d0, d1, d2) -> (d1, d0, 0)>} :
- tensor<?x?x?xf32>, vector<4x2x6xf32>
- ```
- returns a shape [2, 4, 1].
- }],
- /*retTy=*/"SmallVector<int64_t>",
- /*methodName=*/"getTransferChunkAccessed",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- SmallVector<int64_t> dimSizes($_op.getPermutationMap().getNumDims(), 1);
- for (auto vecDims : llvm::zip($_op.getPermutationMap().getResults(),
- $_op.getVectorType().getShape())) {
- AffineExpr dim = std::get<0>(vecDims);
- int64_t size = std::get<1>(vecDims);
- // Skip broadcast.
- if (isa<AffineConstantExpr>(dim))
- continue;
- dimSizes[cast<AffineDimExpr>(dim).getPosition()] = size;
- }
- return dimSizes;
- }]
- >,
- ];
+ resultIdx < eResult;
+ ++resultIdx, ++indicesIdx)
+ fun(resultIdx, indicesIdx);
+ }
+
+ /// Return the shape of the hyperrectangular slice within the tensor/memref
+ /// operand that is accessed by the transfer op.
+ /// For example:
+ /// ```
+ /// vector.transfer %w0[%i, %j, %k] {
+ /// permutation_map = affine_map<(d0, d1, d2) -> (d1, d0, 0)>} :
+ /// : tensor<?x?x?xf32>, vector<4x2x6xf32>
+ /// ```
+ /// returns a shape [2, 4, 1].
+ SmallVector<int64_t> getTransferChunkAccessed() {
+ SmallVector<int64_t> dimSizes($_op.getPermutationMap().getNumDims(), 1);
+ for (auto vecDims : llvm::zip($_op.getPermutationMap().getResults(),
+ $_op.getVectorType().getShape())) {
+ AffineExpr dim = std::get<0>(vecDims);
+ int64_t size = std::get<1>(vecDims);
+ // Skip broadcast.
+ if (isa<AffineConstantExpr>(dim))
+ continue;
+ dimSizes[cast<AffineDimExpr>(dim).getPosition()] = size;
+ }
+ return dimSizes;
+ }
+ }];
}
#endif // MLIR_INTERFACES_VECTORINTERFACES
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index c4778394375efdc..95f49fa32bc0ae2 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -191,9 +191,9 @@ bool mlir::vector::isDisjointTransferIndices(
if (transferA.getVectorType() != transferB.getVectorType())
return false;
unsigned rankOffset = transferA.getLeadingShapedRank();
- for (unsigned i = 0, e = transferA.indices().size(); i < e; i++) {
- Value indexA = transferA.indices()[i];
- Value indexB = transferB.indices()[i];
+ for (unsigned i = 0, e = transferA.getIndices().size(); i < e; i++) {
+ Value indexA = transferA.getIndices()[i];
+ Value indexB = transferB.getIndices()[i];
std::optional<int64_t> cstIndexA = getConstantIntValue(indexA);
std::optional<int64_t> cstIndexB = getConstantIntValue(indexB);
@@ -251,7 +251,7 @@ bool mlir::vector::isDisjointTransferIndices(
bool mlir::vector::isDisjointTransferSet(VectorTransferOpInterface transferA,
VectorTransferOpInterface transferB,
bool testDynamicValueUsingBounds) {
- if (transferA.source() != transferB.source())
+ if (transferA.getSource() != transferB.getSource())
return false;
return isDisjointTransferIndices(transferA, transferB,
testDynamicValueUsingBounds);
@@ -3740,10 +3740,10 @@ static void printTransferAttrs(OpAsmPrinter &p, VectorTransferOpInterface op) {
SmallVector<StringRef, 3> elidedAttrs;
elidedAttrs.push_back(TransferReadOp::getOperandSegmentSizeAttr());
if (op.getPermutationMap().isMinorIdentity())
- elidedAttrs.push_back(op.getPermutationMapAttrStrName());
+ elidedAttrs.push_back(op.getPermutationMapAttrName());
// Elide in_bounds attribute if all dims are out-of-bounds.
if (llvm::none_of(op.getInBoundsValues(), [](bool b) { return b; }))
- elidedAttrs.push_back(op.getInBoundsAttrStrName());
+ elidedAttrs.push_back(op.getInBoundsAttrName());
p.printOptionalAttrDict(op->getAttrs(), elidedAttrs);
}
@@ -3802,7 +3802,7 @@ ParseResult TransferReadOp::parse(OpAsmParser &parser, OperationState &result) {
VectorType vectorType = llvm::dyn_cast<VectorType>(types[1]);
if (!vectorType)
return parser.emitError(typesLoc, "requires vector type");
- auto permMapAttrName = TransferReadOp::getPermutationMapAttrStrName();
+ auto permMapAttrName = TransferReadOp::getPermutationMapAttrName(result.name);
Attribute permMapAttr = result.attributes.get(permMapAttrName);
AffineMap permMap;
if (!permMapAttr) {
@@ -3888,7 +3888,7 @@ Type TransferReadOp::getExpectedMaskType() {
template <typename TransferOp>
static bool isInBounds(TransferOp op, int64_t resultIdx, int64_t indicesIdx) {
// TODO: support more aggressive createOrFold on:
- // `op.indices()[indicesIdx] + vectorType < dim(op.source(), indicesIdx)`
+ // op.getIndices()[indicesIdx] + vectorType < dim(op.getSource(), indicesIdx)
if (op.getShapedType().isDynamicDim(indicesIdx))
return false;
Value index = op.getIndices()[indicesIdx];
@@ -3932,8 +3932,7 @@ static LogicalResult foldTransferInBoundsAttribute(TransferOp op) {
return failure();
// OpBuilder is only used as a helper to build an I64ArrayAttr.
OpBuilder b(op.getContext());
- op->setAttr(TransferOp::getInBoundsAttrStrName(),
- b.getBoolArrayAttr(newInBounds));
+ op.setInBoundsAttr(b.getBoolArrayAttr(newInBounds));
return success();
}
@@ -4169,7 +4168,8 @@ ParseResult TransferWriteOp::parse(OpAsmParser &parser,
ShapedType shapedType = llvm::dyn_cast<ShapedType>(types[1]);
if (!shapedType || !llvm::isa<MemRefType, RankedTensorType>(shapedType))
return parser.emitError(typesLoc, "requires memref or ranked tensor type");
- auto permMapAttrName = TransferWriteOp::getPermutationMapAttrStrName();
+ auto permMapAttrName =
+ TransferWriteOp::getPermutationMapAttrName(result.name);
auto permMapAttr = result.attributes.get(permMapAttrName);
AffineMap permMap;
if (!permMapAttr) {
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
index 7a21863c4d9e985..ea33453e7215e31 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
@@ -56,9 +56,9 @@ static Value createInBoundsCond(RewriterBase &b,
int64_t vectorSize = xferOp.getVectorType().getDimSize(resultIdx);
OpFoldResult sum = affine::makeComposedFoldedAffineApply(
b, loc, b.getAffineDimExpr(0) + b.getAffineConstantExpr(vectorSize),
- {xferOp.indices()[indicesIdx]});
+ {xferOp.getIndices()[indicesIdx]});
OpFoldResult dimSz =
- memref::getMixedSize(b, loc, xferOp.source(), indicesIdx);
+ memref::getMixedSize(b, loc, xferOp.getSource(), indicesIdx);
auto maybeCstSum = getConstantIntValue(sum);
auto maybeCstDimSz = getConstantIntValue(dimSz);
if (maybeCstSum && maybeCstDimSz && *maybeCstSum <= *maybeCstDimSz)
@@ -106,9 +106,9 @@ static Value createInBoundsCond(RewriterBase &b,
///
/// Preconditions:
/// 1. `xferOp.getPermutationMap()` must be a minor identity map
-/// 2. the rank of the `xferOp.memref()` and the rank of the `xferOp.vector()`
-/// must be equal. This will be relaxed in the future but requires
-/// rank-reducing subviews.
+/// 2. the rank of the `xferOp.memref()` and the rank of the
+/// `xferOp.getVector()` must be equal. This will be relaxed in the future
+/// but requires rank-reducing subviews.
static LogicalResult
splitFullAndPartialTransferPrecondition(VectorTransferOpInterface xferOp) {
// TODO: support 0-d corner case.
@@ -185,7 +185,7 @@ static Value castToCompatibleMemRefType(OpBuilder &b, Value memref,
}
/// Operates under a scoped context to build the intersection between the
-/// view `xferOp.source()` @ `xferOp.indices()` and the view `alloc`.
+/// view `xferOp.getSource()` @ `xferOp.getIndices()` and the view `alloc`.
// TODO: view intersection/union/differences should be a proper std op.
static std::pair<Value, Value>
createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp,
@@ -196,16 +196,16 @@ createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp,
assert(memrefRank == cast<MemRefType>(alloc.getType()).getRank() &&
"Expected memref rank to match the alloc rank");
ValueRange leadingIndices =
- xferOp.indices().take_front(xferOp.getLeadingShapedRank());
+ xferOp.getIndices().take_front(xferOp.getLeadingShapedRank());
SmallVector<OpFoldResult, 4> sizes;
sizes.append(leadingIndices.begin(), leadingIndices.end());
auto isaWrite = isa<vector::TransferWriteOp>(xferOp);
xferOp.zipResultAndIndexing([&](int64_t resultIdx, int64_t indicesIdx) {
using MapList = ArrayRef<ArrayRef<AffineExpr>>;
- Value dimMemRef =
- b.create<memref::DimOp>(xferOp.getLoc(), xferOp.source(), indicesIdx);
+ Value dimMemRef = b.create<memref::DimOp>(xferOp.getLoc(),
+ xferOp.getSource(), indicesIdx);
Value dimAlloc = b.create<memref::DimOp>(loc, alloc, resultIdx);
- Value index = xferOp.indices()[indicesIdx];
+ Value index = xferOp.getIndices()[indicesIdx];
AffineExpr i, j, k;
bindDims(xferOp.getContext(), i, j, k);
SmallVector<AffineMap, 4> maps =
@@ -217,13 +217,13 @@ createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp,
});
SmallVector<OpFoldResult> srcIndices = llvm::to_vector<4>(llvm::map_range(
- xferOp.indices(), [](Value idx) -> OpFoldResult { return idx; }));
+ xferOp.getIndices(), [](Value idx) -> OpFoldResult { return idx; }));
SmallVector<OpFoldResult> destIndices(memrefRank, b.getIndexAttr(0));
SmallVector<OpFoldResult> strides(memrefRank, b.getIndexAttr(1));
auto copySrc = b.create<memref::SubViewOp>(
- loc, isaWrite ? alloc : xferOp.source(), srcIndices, sizes, strides);
+ loc, isaWrite ? alloc : xferOp.getSource(), srcIndices, sizes, strides);
auto copyDest = b.create<memref::SubViewOp>(
- loc, isaWrite ? xferOp.source() : alloc, destIndices, sizes, strides);
+ loc, isaWrite ? xferOp.getSource() : alloc, destIndices, sizes, strides);
return std::make_pair(copySrc, copyDest);
}
@@ -318,7 +318,7 @@ static scf::IfOp createFullPartialVectorTransferRead(
},
[&](OpBuilder &b, Location loc) {
Operation *newXfer = b.clone(*xferOp.getOperation());
- Value vector = cast<VectorTransferOpInterface>(newXfer).vector();
+ Value vector = cast<VectorTransferOpInterface>(newXfer).getVector();
b.create<memref::StoreOp>(
loc, vector,
b.create<vector::TypeCastOp>(
@@ -513,9 +513,9 @@ static Operation *getAutomaticAllocationScope(Operation *op) {
///
/// Preconditions:
/// 1. `xferOp.getPermutationMap()` must be a minor identity map
-/// 2. the rank of the `xferOp.source()` and the rank of the `xferOp.vector()`
-/// must be equal. This will be relaxed in the future but requires
-/// rank-reducing subviews.
+/// 2. the rank of the `xferOp.getSource()` and the rank of the
+/// `xferOp.getVector()` must be equal. This will be relaxed in the future
+/// but requires rank-reducing subviews.
LogicalResult mlir::vector::splitFullAndPartialTransfer(
RewriterBase &b, VectorTransferOpInterface xferOp,
VectorTransformsOptions options, scf::IfOp *ifOp) {
@@ -526,7 +526,7 @@ LogicalResult mlir::vector::splitFullAndPartialTransfer(
auto inBoundsAttr = b.getBoolArrayAttr(bools);
if (options.vectorTransferSplit == VectorTransferSplit::ForceInBounds) {
b.updateRootInPlace(xferOp, [&]() {
- xferOp->setAttr(xferOp.getInBoundsAttrStrName(), inBoundsAttr);
+ xferOp->setAttr(xferOp.getInBoundsAttrName(), inBoundsAttr);
});
return success();
}
@@ -599,7 +599,7 @@ LogicalResult mlir::vector::splitFullAndPartialTransfer(
xferReadOp.setOperand(i, fullPartialIfOp.getResult(i));
b.updateRootInPlace(xferOp, [&]() {
- xferOp->setAttr(xferOp.getInBoundsAttrStrName(), inBoundsAttr);
+ xferOp->setAttr(xferOp.getInBoundsAttrName(), inBoundsAttr);
});
return success();
More information about the Mlir-commits
mailing list