[Mlir-commits] [mlir] 8feedd5 - [mlir][linalg] Fix the semantic use of a flag (#90081)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Apr 25 17:09:03 PDT 2024
Author: Prashant Kumar
Date: 2024-04-26T05:39:00+05:30
New Revision: 8feedd5e067a5ea13e36c39dc634da5c34284ddd
URL: https://github.com/llvm/llvm-project/commit/8feedd5e067a5ea13e36c39dc634da5c34284ddd
DIFF: https://github.com/llvm/llvm-project/commit/8feedd5e067a5ea13e36c39dc634da5c34284ddd.diff
LOG: [mlir][linalg] Fix the semantic use of a flag (#90081)
`useInBoundsInsteadOfMasking` was doing the opposite i.e., when set to
true; was updating the mask instead of updating the inBounds.
Added:
Modified:
mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index 8a57c6094c41c0..030be328e97fd0 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -194,7 +194,7 @@ bool isLinearizableVector(VectorType type);
/// for each dimension of the passed in tensor.
Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
ArrayRef<int64_t> readShape, Value padValue,
- bool useInBoundsInsteadOfMasking = true);
+ bool useInBoundsInsteadOfMasking);
/// Returns success if `inputVectorSizes` is a valid masking configuraion for
/// given `shape`, i.e., it meets:
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index e836f0dc63b4f9..ef9a30be9a0153 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1499,11 +1499,11 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, tensor::PackOp packOp,
// If the input vector sizes are not provided, then the vector sizes are
// determined by the result tensor shape. In case the vector sizes aren't
// provided, we update the inBounds attribute instead of masking.
- bool useInBoundsInsteadOfMasking = true;
+ bool useInBoundsInsteadOfMasking = false;
if (inputVectorSizes.empty()) {
ArrayRef<int64_t> resultTensorShape = packOp.getDestType().getShape();
inputVectorSizes = resultTensorShape.take_front(packOp.getSourceRank());
- useInBoundsInsteadOfMasking = false;
+ useInBoundsInsteadOfMasking = true;
}
// Create masked TransferReadOp.
@@ -1612,7 +1612,8 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, tensor::UnPackOp unpackOp,
// to shape of source, then a mask is necessary.
Value readResult = vector::createReadOrMaskedRead(
rewriter, loc, unpackOp.getSource(),
- ArrayRef<int64_t>(readMaskShape.begin(), readMaskShape.end()), padValue);
+ ArrayRef<int64_t>(readMaskShape.begin(), readMaskShape.end()), padValue,
+ /*useInBoundsInsteadOfMasking=*/false);
PackingMetadata packMetadata;
SmallVector<int64_t> lastDimToInsertPosPerm =
@@ -1669,7 +1670,8 @@ vectorizeAsTensorPadOp(RewriterBase &rewriter, tensor::PadOp padOp,
(void)status; // prevent unused variable warning on non-assert builds
assert(succeeded(status) && "failed to reify result shapes");
auto maskedRead = vector::createReadOrMaskedRead(
- rewriter, loc, padOp.getSource(), inputVectorSizes, padValue);
+ rewriter, loc, padOp.getSource(), inputVectorSizes, padValue,
+ /*useInBoundsInsteadOfMasking=*/false);
Operation *write = createWriteOrMaskedWrite(
rewriter, loc, maskedRead, reifiedReturnShapes[0], inputVectorSizes);
newResults.push_back(write->getResult(0));
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index fcaf1ec944b479..6727f3f461722b 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -345,7 +345,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
int64_t readRank = readShape.size();
auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
SmallVector<bool> inBoundsVal(readRank, true);
- if (!useInBoundsInsteadOfMasking) {
+ if (useInBoundsInsteadOfMasking) {
// Update the inBounds attribute.
for (unsigned i = 0; i < readRank; i++)
inBoundsVal[i] = (sourceShape[i] == readShape[i]) &&
@@ -359,7 +359,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
/*padding=*/padValue,
/*inBounds=*/inBoundsVal);
- if (llvm::equal(readShape, sourceShape) || !useInBoundsInsteadOfMasking)
+ if (llvm::equal(readShape, sourceShape) || useInBoundsInsteadOfMasking)
return transferReadOp;
SmallVector<OpFoldResult> mixedSourceDims =
tensor::getMixedSizes(builder, loc, source);
More information about the Mlir-commits
mailing list