[Mlir-commits] [mlir] [mlir][vector] Simplify createReadOrMaskedRead (PR #163736)
Andrzej WarzyĆski
llvmlistbot at llvm.org
Thu Nov 6 08:07:29 PST 2025
https://github.com/banach-space updated https://github.com/llvm/llvm-project/pull/163736
>From 2845025142d371e0b8c09d6648a69f794ab0cf31 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Thu, 16 Oct 2025 10:22:45 +0000
Subject: [PATCH 1/2] [mlir][vector] Simplify `createReadOrMaskedRead`
Simplify `createReadOrMaskedRead` to only require _one_ argument to
specify the vector type to read (passed as `VectorType`) instead of
passing vector-sizes and scalable-flags independently (i.e. _two_
arguments).
---
.../mlir/Dialect/Vector/Utils/VectorUtils.h | 10 +++---
.../Linalg/Transforms/Vectorization.cpp | 26 +++++++-------
mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 36 +++++++++----------
3 files changed, 36 insertions(+), 36 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index a57aadcdcc5b0..2e6fab30e5120 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -219,18 +219,16 @@ bool isLinearizableVector(VectorType type);
/// Creates a TransferReadOp from `source`.
///
-/// The shape of the vector to read is specified via `inputVectorSizes`. If the
-/// shape of the output vector differs from the shape of the value being read,
-/// masking is used to avoid out-of-bounds accesses. Set
+/// If the shape of vector to read differs from the shape of the value being
+/// read, masking is used to avoid out-of-bounds accesses. Set
/// `useInBoundsInsteadOfMasking` to `true` to use the "in_bounds" attribute
/// instead of explicit masks.
///
/// Note: all read offsets are set to 0.
Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
- ArrayRef<int64_t> inputVectorSizes,
+ VectorType &vecToReadTy,
std::optional<Value> padValue = std::nullopt,
- bool useInBoundsInsteadOfMasking = false,
- ArrayRef<bool> inputScalableVecDims = {});
+ bool useInBoundsInsteadOfMasking = false);
/// Returns success if `inputVectorSizes` is a valid masking configuraion for
/// given `shape`, i.e., it meets:
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 19d2d854a3838..dcf84c46949f3 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1890,9 +1890,8 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, linalg::PackOp packOp,
// Create masked TransferReadOp.
auto maskedRead = vector::createReadOrMaskedRead(
- rewriter, loc, packOp.getSource(), readVecType.getShape(), padValue,
- useInBoundsInsteadOfMasking,
- /*inputScalableVecSizes=*/{});
+ rewriter, loc, packOp.getSource(), readVecType, padValue,
+ useInBoundsInsteadOfMasking);
// Create ShapeCastOp.
auto shapeCastOp = vector::ShapeCastOp::create(
@@ -1977,9 +1976,12 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, linalg::UnPackOp unpackOp,
}
// -- Generate the read operation --
+ VectorType readVecType =
+ VectorType::get(readVectorSizes, unpackTensorType.getElementType(),
+ readScalableVectorFlags);
Value readResult = vector::createReadOrMaskedRead(
- rewriter, loc, unpackOp.getSource(), readVectorSizes, std::nullopt,
- useInBoundsInsteadOfMasking, readScalableVectorFlags);
+ rewriter, loc, unpackOp.getSource(), readVecType, std::nullopt,
+ useInBoundsInsteadOfMasking);
// -- Generate the transpose operation --
PackingMetadata packMetadata;
@@ -2025,9 +2027,10 @@ vectorizeAsTensorPadOp(RewriterBase &rewriter, tensor::PadOp padOp,
.reifyResultShapes(rewriter, reifiedReturnShapes);
(void)status; // prevent unused variable warning on non-assert builds
assert(succeeded(status) && "failed to reify result shapes");
+ auto readType = VectorType::get(inputVectorSizes, padValue.getType());
auto maskedRead = vector::createReadOrMaskedRead(
- rewriter, loc, padOp.getSource(), inputVectorSizes, padValue,
- /*useInBoundsInsteadOfMasking=*/false, /*inputScalableVecSizes=*/{});
+ rewriter, loc, padOp.getSource(), readType, padValue,
+ /*useInBoundsInsteadOfMasking=*/false);
// Create Xfer write Op
Value dest = tensor::EmptyOp::create(rewriter, loc, reifiedReturnShapes[0],
@@ -2222,9 +2225,9 @@ vectorizeAsLinalgContraction(RewriterBase &rewriter, VectorizationState &state,
state.getCanonicalVecType(elemType, readMap.compose(indexingMap));
Value read = mlir::vector::createReadOrMaskedRead(
- rewriter, loc, opOperand.get(), readType.getShape(),
+ rewriter, loc, opOperand.get(), readType,
/*padding=*/arith::getZeroConstant(rewriter, loc, elemType),
- /*useInBoundsInsteadOfMasking=*/false, readType.getScalableDims());
+ /*useInBoundsInsteadOfMasking=*/false);
vecOperands.push_back(read);
}
@@ -3165,9 +3168,8 @@ vectorizeAsInsertSliceOp(RewriterBase &rewriter, tensor::InsertSliceOp sliceOp,
SmallVector<Value> readIndices(
vecType.getRank(), arith::ConstantIndexOp::create(rewriter, loc, 0));
Value read = mlir::vector::createReadOrMaskedRead(
- rewriter, loc, source, vecType.getShape(), padValue,
- /*useInBoundsInsteadOfMasking=*/inputVectorSizes.empty(),
- /*inputScalableVecSizes=*/{});
+ rewriter, loc, source, vecType, padValue,
+ /*useInBoundsInsteadOfMasking=*/inputVectorSizes.empty());
// Create write
auto writeIndices =
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index c809c50206793..a86980d1095c0 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -317,51 +317,51 @@ bool vector::isLinearizableVector(VectorType type) {
}
Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
- Value source,
- ArrayRef<int64_t> inputVectorSizes,
+ Value source, VectorType &vecToReadTy,
std::optional<Value> padValue,
- bool useInBoundsInsteadOfMasking,
- ArrayRef<bool> inputScalableVecDims) {
- assert(!llvm::is_contained(inputVectorSizes, ShapedType::kDynamic) &&
+ bool useInBoundsInsteadOfMasking) {
+ assert(!llvm::is_contained(vecToReadTy.getScalableDims(),
+ ShapedType::kDynamic) &&
"invalid input vector sizes");
auto sourceShapedType = cast<ShapedType>(source.getType());
auto sourceShape = sourceShapedType.getShape();
- assert(sourceShape.size() == inputVectorSizes.size() &&
+
+ int64_t vecToReadRank = vecToReadTy.getRank();
+ auto vecToReadShape = vecToReadTy.getShape();
+
+ assert(sourceShape.size() == static_cast<size_t>(vecToReadRank) &&
"expected same ranks.");
- auto vectorType =
- VectorType::get(inputVectorSizes, sourceShapedType.getElementType(),
- inputScalableVecDims);
assert((!padValue.has_value() ||
padValue.value().getType() == sourceShapedType.getElementType()) &&
"expected same pad element type to match source element type");
- int64_t readRank = inputVectorSizes.size();
+
auto zero = arith::ConstantIndexOp::create(builder, loc, 0);
- SmallVector<bool> inBoundsVal(readRank, true);
+ SmallVector<bool> inBoundsVal(vecToReadRank, true);
if (useInBoundsInsteadOfMasking) {
// Update the inBounds attribute.
// FIXME: This computation is too weak - it ignores the read indices.
- for (unsigned i = 0; i < readRank; i++)
- inBoundsVal[i] = (sourceShape[i] == inputVectorSizes[i]) &&
+ for (unsigned i = 0; i < vecToReadRank; i++)
+ inBoundsVal[i] = (sourceShape[i] == vecToReadShape[i]) &&
ShapedType::isStatic(sourceShape[i]);
}
auto transferReadOp = vector::TransferReadOp::create(
builder, loc,
- /*vectorType=*/vectorType,
+ /*vectorType=*/vecToReadTy,
/*source=*/source,
- /*indices=*/SmallVector<Value>(readRank, zero),
+ /*indices=*/SmallVector<Value>(vecToReadRank, zero),
/*padding=*/padValue,
/*inBounds=*/inBoundsVal);
- if (llvm::equal(inputVectorSizes, sourceShape) || useInBoundsInsteadOfMasking)
+ if (llvm::equal(vecToReadTy.getShape(), sourceShape) ||
+ useInBoundsInsteadOfMasking)
return transferReadOp;
SmallVector<OpFoldResult> mixedSourceDims =
isa<MemRefType>(source.getType())
? memref::getMixedSizes(builder, loc, source)
: tensor::getMixedSizes(builder, loc, source);
- auto maskType = VectorType::get(inputVectorSizes, builder.getI1Type(),
- inputScalableVecDims);
+ auto maskType = vecToReadTy.clone(builder.getI1Type());
Value mask =
vector::CreateMaskOp::create(builder, loc, maskType, mixedSourceDims);
return mlir::vector::maskOperation(builder, transferReadOp, mask)
>From a24d857ab6073647503641eb2eb39a805bfe9ae1 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Fri, 17 Oct 2025 07:46:58 +0000
Subject: [PATCH 2/2] VectorType vecToReadTy -> VectorType *vecToReadTy
---
mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h | 2 +-
mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 5 +++--
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index 2e6fab30e5120..491e7510113f7 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -226,7 +226,7 @@ bool isLinearizableVector(VectorType type);
///
/// Note: all read offsets are set to 0.
Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
- VectorType &vecToReadTy,
+ const VectorType &vecToReadTy,
std::optional<Value> padValue = std::nullopt,
bool useInBoundsInsteadOfMasking = false);
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index a86980d1095c0..82136bad85719 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -317,7 +317,8 @@ bool vector::isLinearizableVector(VectorType type) {
}
Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
- Value source, VectorType &vecToReadTy,
+ Value source,
+ const VectorType &vecToReadTy,
std::optional<Value> padValue,
bool useInBoundsInsteadOfMasking) {
assert(!llvm::is_contained(vecToReadTy.getScalableDims(),
@@ -361,7 +362,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
? memref::getMixedSizes(builder, loc, source)
: tensor::getMixedSizes(builder, loc, source);
- auto maskType = vecToReadTy.clone(builder.getI1Type());
+ auto maskType = vecToReadTy.cloneWith(/*shape=*/{}, builder.getI1Type());
Value mask =
vector::CreateMaskOp::create(builder, loc, maskType, mixedSourceDims);
return mlir::vector::maskOperation(builder, transferReadOp, mask)
More information about the Mlir-commits
mailing list