[Mlir-commits] [mlir] Make createReadOrMaskedRead and isValidMaskedInputVector vector utilities (PR #89119)
Lubomir Litchev
llvmlistbot at llvm.org
Fri Apr 19 10:31:08 PDT 2024
https://github.com/LLITCHEV updated https://github.com/llvm/llvm-project/pull/89119
>From ba6d1ecf953172b41a1d3f8a35a30b7df97a67e7 Mon Sep 17 00:00:00 2001
From: Lubo Litchev <lubol at google.com>
Date: Wed, 17 Apr 2024 18:40:54 +0000
Subject: [PATCH 1/6] Make createReadOrMaskedRead a utility
Made the createReadOrMaskedRead a utility function - to be accessible
outside of the CU. Needed by the IREE new TopK implementation.
---
.../Dialect/Linalg/Transforms/Transforms.h | 6 +++
.../Dialect/Linalg/Transforms/Transforms.cpp | 29 ++++++++++++++
.../Linalg/Transforms/Vectorization.cpp | 40 -------------------
3 files changed, 35 insertions(+), 40 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index feb3b3f03cf538..f4c56b671e9d7e 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -1616,6 +1616,12 @@ void populateSplitReductionPattern(
const ControlSplitReductionFn &controlSplitReductionFn,
bool useAlloc = false);
+/// Create a TransferReadOp from `source` with static shape `readShape`. If the
+/// vector type for the read is not the same as the type of `source`, then a
+/// mask is created on the read.
+Value createReadOrMaskedRead(OpBuilder &builder, Location loc,
+ Value source, ArrayRef<int64_t> readShape,
+ Value padValue);
} // namespace linalg
} // namespace mlir
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index a17bc8e4cd318f..b32ebfc380fcfb 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -1593,3 +1593,32 @@ void linalg::populateDecomposeConvolutionPatterns(RewritePatternSet &patterns,
DownscaleSizeOneWindowed2DConvolution<PoolingNchwMaxOp, PoolingNcwMaxOp>>(
patterns.getContext(), benefit);
}
+
+Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc,
+ Value source, ArrayRef<int64_t> readShape,
+ Value padValue) {
+ assert(llvm::none_of(readShape,
+ [](int64_t s) { return s == ShapedType::kDynamic; }));
+ auto sourceShape = dyn_cast<ShapedType>(source.getType()).getShape();
+ assert(sourceShape.size() == readShape.size());
+ auto maskType = VectorType::get(readShape, builder.getI1Type());
+ auto vectorType = VectorType::get(readShape, padValue.getType());
+ int64_t readRank = readShape.size();
+ auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
+ auto transferReadOp = builder.create<vector::TransferReadOp>(
+ loc,
+ /*vectorType=*/vectorType,
+ /*source=*/source,
+ /*indices=*/SmallVector<Value>(readRank, zero),
+ /*padding=*/padValue,
+ /*inBounds=*/SmallVector<bool>(readRank, true));
+ if (llvm::equal(readShape, sourceShape)) {
+ return transferReadOp;
+ }
+ SmallVector<OpFoldResult> mixedSourceDims =
+ tensor::getMixedSizes(builder, loc, source);
+ Value mask =
+ builder.create<vector::CreateMaskOp>(loc, maskType, mixedSourceDims);
+ return mlir::vector::maskOperation(builder, transferReadOp, mask)
+ ->getResult(0);
+}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index df61381432921b..e2ca5e14377286 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1410,46 +1410,6 @@ static SmallVector<int64_t> getTiledPackShape(tensor::PackOp packOp,
return applyPermutation(destShape, tensor::getPackInverseDestPerm(packOp));
}
-/// Create a TransferReadOp from `source` with static shape `readShape`. If the
-/// vector type for the read is not the same as the type of `source`, then a
-/// mask is created on the read. If `doMasking` parameter is set to false we
-/// update the `inBounds` attribute instead of masking.
-static Value createReadOrMaskedRead(OpBuilder &builder, Location loc,
- Value source, ArrayRef<int64_t> readShape,
- Value padValue, bool doMasking = true) {
- assert(llvm::none_of(readShape,
- [](int64_t s) { return s == ShapedType::kDynamic; }));
- auto sourceShape = dyn_cast<ShapedType>(source.getType()).getShape();
- assert(sourceShape.size() == readShape.size());
- auto maskType = VectorType::get(readShape, builder.getI1Type());
- auto vectorType = VectorType::get(readShape, padValue.getType());
- int64_t readRank = readShape.size();
- auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
- SmallVector<bool> inBoundsVal(readRank, true);
- if (!doMasking) {
- // Update the inBounds attribute.
- for (unsigned i = 0; i < readRank; i++)
- inBoundsVal[i] = sourceShape[i] == readShape[i];
- }
- auto transferReadOp = builder.create<vector::TransferReadOp>(
- loc,
- /*vectorType=*/vectorType,
- /*source=*/source,
- /*indices=*/SmallVector<Value>(readRank, zero),
- /*padding=*/padValue,
- /*inBounds=*/inBoundsVal);
-
- if (llvm::equal(readShape, sourceShape) || !doMasking) {
- return transferReadOp;
- }
- SmallVector<OpFoldResult> mixedSourceDims =
- tensor::getMixedSizes(builder, loc, source);
- Value mask =
- builder.create<vector::CreateMaskOp>(loc, maskType, mixedSourceDims);
- return mlir::vector::maskOperation(builder, transferReadOp, mask)
- ->getResult(0);
-}
-
/// Given an input, the mixed destSizes, and the vector sizes for vectorization,
/// create an empty destination tensor and create a TransferWriteOp from the
/// input to the empty tensor. If the destination shape is not the same as the
>From 10823adeb62c0c42b63d9e66706ead84bb0fb534 Mon Sep 17 00:00:00 2001
From: Lubo Litchev <lubol at google.com>
Date: Wed, 17 Apr 2024 18:48:13 +0000
Subject: [PATCH 2/6] Formatting fixes.
---
mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h | 5 ++---
mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp | 5 +++--
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index f4c56b671e9d7e..a8175c98776775 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -1619,9 +1619,8 @@ void populateSplitReductionPattern(
/// Create a TransferReadOp from `source` with static shape `readShape`. If the
/// vector type for the read is not the same as the type of `source`, then a
/// mask is created on the read.
-Value createReadOrMaskedRead(OpBuilder &builder, Location loc,
- Value source, ArrayRef<int64_t> readShape,
- Value padValue);
+Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
+ ArrayRef<int64_t> readShape, Value padValue);
} // namespace linalg
} // namespace mlir
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index b32ebfc380fcfb..ebc7933f7fd35b 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -1595,8 +1595,9 @@ void linalg::populateDecomposeConvolutionPatterns(RewritePatternSet &patterns,
}
Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc,
- Value source, ArrayRef<int64_t> readShape,
- Value padValue) {
+ Value source,
+ ArrayRef<int64_t> readShape,
+ Value padValue) {
assert(llvm::none_of(readShape,
[](int64_t s) { return s == ShapedType::kDynamic; }));
auto sourceShape = dyn_cast<ShapedType>(source.getType()).getShape();
>From 0898983570b7458ad8ed22065d37a5d9592954fd Mon Sep 17 00:00:00 2001
From: Lubo Litchev <lubol at google.com>
Date: Wed, 17 Apr 2024 21:21:10 +0000
Subject: [PATCH 3/6] Merge of the latest.
---
.../mlir/Dialect/Linalg/Transforms/Transforms.h | 3 ++-
mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp | 13 ++++++++++---
2 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index a8175c98776775..db6b23c5894941 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -1620,7 +1620,8 @@ void populateSplitReductionPattern(
/// vector type for the read is not the same as the type of `source`, then a
/// mask is created on the read.
Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
- ArrayRef<int64_t> readShape, Value padValue);
+ ArrayRef<int64_t> readShape, Value padValue,
+ bool doMasking = true);
} // namespace linalg
} // namespace mlir
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index ebc7933f7fd35b..b4d70c464e1268 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -1597,7 +1597,7 @@ void linalg::populateDecomposeConvolutionPatterns(RewritePatternSet &patterns,
Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc,
Value source,
ArrayRef<int64_t> readShape,
- Value padValue) {
+ Value padValue, bool doMasking) {
assert(llvm::none_of(readShape,
[](int64_t s) { return s == ShapedType::kDynamic; }));
auto sourceShape = dyn_cast<ShapedType>(source.getType()).getShape();
@@ -1606,14 +1606,21 @@ Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc,
auto vectorType = VectorType::get(readShape, padValue.getType());
int64_t readRank = readShape.size();
auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
+ SmallVector<bool> inBoundsVal(readRank, true);
+ if (!doMasking) {
+ // Update the inBounds attribute.
+ for (unsigned i = 0; i < readRank; i++)
+ inBoundsVal[i] = sourceShape[i] == readShape[i];
+ }
auto transferReadOp = builder.create<vector::TransferReadOp>(
loc,
/*vectorType=*/vectorType,
/*source=*/source,
/*indices=*/SmallVector<Value>(readRank, zero),
/*padding=*/padValue,
- /*inBounds=*/SmallVector<bool>(readRank, true));
- if (llvm::equal(readShape, sourceShape)) {
+ /*inBounds=*/inBoundsVal);
+
+ if (llvm::equal(readShape, sourceShape) || !doMasking) {
return transferReadOp;
}
SmallVector<OpFoldResult> mixedSourceDims =
>From d31a4cf82132ef3a9df95d343e031f63b305e077 Mon Sep 17 00:00:00 2001
From: Lubo Litchev <lubol at google.com>
Date: Fri, 19 Apr 2024 14:46:46 +0000
Subject: [PATCH 4/6] Addressed CR requests.
Moved code around and other CR.
---
.../Dialect/Linalg/Transforms/Transforms.h | 6 --
.../mlir/Dialect/Vector/Utils/VectorUtils.h | 17 +++++
.../Dialect/Linalg/Transforms/Transforms.cpp | 37 ----------
.../Linalg/Transforms/Vectorization.cpp | 50 +++-----------
mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 68 +++++++++++++++++++
5 files changed, 93 insertions(+), 85 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index db6b23c5894941..feb3b3f03cf538 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -1616,12 +1616,6 @@ void populateSplitReductionPattern(
const ControlSplitReductionFn &controlSplitReductionFn,
bool useAlloc = false);
-/// Create a TransferReadOp from `source` with static shape `readShape`. If the
-/// vector type for the read is not the same as the type of `source`, then a
-/// mask is created on the read.
-Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
- ArrayRef<int64_t> readShape, Value padValue,
- bool doMasking = true);
} // namespace linalg
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index f88fbdf9e62765..f7bea0c25813cb 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -180,6 +180,23 @@ struct MaskableOpRewritePattern : OpRewritePattern<SourceOp> {
/// are not linearizable.
bool isLinearizableVector(VectorType type);
+/// Create a TransferReadOp from `source` with static shape `readShape`. If the
+/// vector type for the read is not the same as the type of `source`, then a
+/// mask is created on the read.
+/// enableMasking if false, the inBoundsVal values are set properly, based on
+/// the rank dimensions of the source and destination tensors.
+Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
+ ArrayRef<int64_t> readShape, Value padValue,
+ bool enableMasking = true);
+
+/// Returns success if `inputVectorSizes` is a valid masking configuraion for
+/// given `shape`, i.e., it meets:
+/// 1. The numbers of elements in both array are equal.
+/// 2. `inputVectorSizes` does not have dynamic dimensions.
+/// 3. All the values in `inputVectorSizes` are greater than or equal to
+/// static sizes in `shape`.
+LogicalResult isValidMaskedInputVector(ArrayRef<int64_t> shape,
+ ArrayRef<int64_t> inputVectorSizes);
} // namespace vector
/// Constructs a permutation map of invariant memref indices to vector
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index b4d70c464e1268..a17bc8e4cd318f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -1593,40 +1593,3 @@ void linalg::populateDecomposeConvolutionPatterns(RewritePatternSet &patterns,
DownscaleSizeOneWindowed2DConvolution<PoolingNchwMaxOp, PoolingNcwMaxOp>>(
patterns.getContext(), benefit);
}
-
-Value mlir::linalg::createReadOrMaskedRead(OpBuilder &builder, Location loc,
- Value source,
- ArrayRef<int64_t> readShape,
- Value padValue, bool doMasking) {
- assert(llvm::none_of(readShape,
- [](int64_t s) { return s == ShapedType::kDynamic; }));
- auto sourceShape = dyn_cast<ShapedType>(source.getType()).getShape();
- assert(sourceShape.size() == readShape.size());
- auto maskType = VectorType::get(readShape, builder.getI1Type());
- auto vectorType = VectorType::get(readShape, padValue.getType());
- int64_t readRank = readShape.size();
- auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
- SmallVector<bool> inBoundsVal(readRank, true);
- if (!doMasking) {
- // Update the inBounds attribute.
- for (unsigned i = 0; i < readRank; i++)
- inBoundsVal[i] = sourceShape[i] == readShape[i];
- }
- auto transferReadOp = builder.create<vector::TransferReadOp>(
- loc,
- /*vectorType=*/vectorType,
- /*source=*/source,
- /*indices=*/SmallVector<Value>(readRank, zero),
- /*padding=*/padValue,
- /*inBounds=*/inBoundsVal);
-
- if (llvm::equal(readShape, sourceShape) || !doMasking) {
- return transferReadOp;
- }
- SmallVector<OpFoldResult> mixedSourceDims =
- tensor::getMixedSizes(builder, loc, source);
- Value mask =
- builder.create<vector::CreateMaskOp>(loc, maskType, mixedSourceDims);
- return mlir::vector::maskOperation(builder, transferReadOp, mask)
- ->getResult(0);
-}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index e2ca5e14377286..4b5e19cb4a4c30 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1667,8 +1667,8 @@ vectorizeAsTensorPadOp(RewriterBase &rewriter, tensor::PadOp padOp,
.reifyResultShapes(rewriter, reifiedReturnShapes);
(void)status; // prevent unused variable warning on non-assert builds
assert(succeeded(status) && "failed to reify result shapes");
- auto maskedRead = createReadOrMaskedRead(rewriter, loc, padOp.getSource(),
- inputVectorSizes, padValue);
+ auto maskedRead = vector::createReadOrMaskedRead(
+ rewriter, loc, padOp.getSource(), inputVectorSizes, padValue);
Operation *write = createWriteOrMaskedWrite(
rewriter, loc, maskedRead, reifiedReturnShapes[0], inputVectorSizes);
newResults.push_back(write->getResult(0));
@@ -1741,41 +1741,6 @@ vectorizeDynamicLinalgOpPrecondition(linalg::LinalgOp op,
return success();
}
-/// Returns success if `inputVectorSizes` is a valid masking configuraion for
-/// given `shape`, i.e., it meets:
-/// 1. The numbers of elements in both array are equal.
-/// 2. `inputVectorSizes` does not have dynamic dimensions.
-/// 3. All the values in `inputVectorSizes` are greater than or equal to
-/// static sizes in `shape`.
-static LogicalResult
-isValidMaskedInputVector(ArrayRef<int64_t> shape,
- ArrayRef<int64_t> inputVectorSizes) {
- LDBG("Iteration space static sizes:");
- LLVM_DEBUG(llvm::interleaveComma(shape, llvm::dbgs()));
- LLVM_DEBUG(llvm::dbgs() << "\n");
-
- if (inputVectorSizes.size() != shape.size()) {
- LDBG("Input vector sizes don't match the number of loops");
- return failure();
- }
- if (ShapedType::isDynamicShape(inputVectorSizes)) {
- LDBG("Input vector sizes can't have dynamic dimensions");
- return failure();
- }
- if (!llvm::all_of(llvm::zip(shape, inputVectorSizes),
- [](std::tuple<int64_t, int64_t> sizePair) {
- int64_t staticSize = std::get<0>(sizePair);
- int64_t inputSize = std::get<1>(sizePair);
- return ShapedType::isDynamic(staticSize) ||
- staticSize <= inputSize;
- })) {
- LDBG("Input vector sizes must be greater than or equal to iteration space "
- "static sizes");
- return failure();
- }
- return success();
-}
-
/// Need to check if the inner-tiles are static/constant.
static LogicalResult
vectorizeUnPackOpPrecondition(tensor::UnPackOp unpackOp,
@@ -1789,7 +1754,7 @@ vectorizeUnPackOpPrecondition(tensor::UnPackOp unpackOp,
}
llvm::ArrayRef<int64_t> resultShape = unpackOp.getDestType().getShape();
if (!inputVectorSizes.empty() &&
- failed(isValidMaskedInputVector(resultShape, inputVectorSizes)))
+ failed(vector::isValidMaskedInputVector(resultShape, inputVectorSizes)))
return failure();
return success();
@@ -1803,8 +1768,8 @@ static LogicalResult vectorizeLinalgOpPrecondition(
return failure();
// Check API contract for input vector sizes.
if (!inputVectorSizes.empty() &&
- failed(isValidMaskedInputVector(linalgOp.getStaticLoopRanges(),
- inputVectorSizes)))
+ failed(vector::isValidMaskedInputVector(linalgOp.getStaticLoopRanges(),
+ inputVectorSizes)))
return failure();
if (linalgOp.hasDynamicShape() && failed(vectorizeDynamicLinalgOpPrecondition(
@@ -1880,7 +1845,7 @@ vectorizePackOpPrecondition(tensor::PackOp packOp,
}
if (!satisfyEmptyCond &&
- failed(isValidMaskedInputVector(
+ failed(vector::isValidMaskedInputVector(
resultTensorShape.take_front(packOp.getSourceRank()),
inputVectorSizes)))
return failure();
@@ -1905,7 +1870,8 @@ vectorizePadOpPrecondition(tensor::PadOp padOp,
}
ArrayRef<int64_t> resultTensorShape = padOp.getResultType().getShape();
- if (failed(isValidMaskedInputVector(resultTensorShape, inputVectorSizes)))
+ if (failed(vector::isValidMaskedInputVector(resultTensorShape,
+ inputVectorSizes)))
return failure();
if (llvm::any_of(padOp.getLow(), [](Value v) {
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index ebc6f5cbcaa9ed..3057ac5d2f97b6 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -322,3 +322,71 @@ bool vector::isLinearizableVector(VectorType type) {
auto numScalableDims = llvm::count(type.getScalableDims(), true);
return (type.getRank() > 1) && (numScalableDims <= 1);
}
+
+Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
+ Value source, ArrayRef<int64_t> readShape,
+ Value padValue, bool enableMasking) {
+ assert(llvm::none_of(readShape,
+ [](int64_t s) { return s == ShapedType::kDynamic; }) &&
+ "expected static shape");
+ auto sourceShape = cast<ShapedType>(source.getType()).getShape();
+ assert(sourceShape.size() == readShape.size() && "expected same ranks.");
+ auto maskType = VectorType::get(readShape, builder.getI1Type());
+ auto vectorType = VectorType::get(readShape, padValue.getType());
+ assert(padValue.getType() == sourceShape.getElementType() &&
+ "expected same pad element type to match source element type")
+ int64_t readRank = readShape.size();
+ auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
+ SmallVector<bool> inBoundsVal(readRank, true);
+ if (!enableMasking) {
+ // Update the inBounds attribute.
+ for (unsigned i = 0; i < readRank; i++)
+ inBoundsVal[i] = (sourceShape[i] == readShape[i]) &&
+ !ShapedType::isDynamic(sourceShape[] i);
+ }
+ auto transferReadOp = builder.create<vector::TransferReadOp>(
+ loc,
+ /*vectorType=*/vectorType,
+ /*source=*/source,
+ /*indices=*/SmallVector<Value>(readRank, zero),
+ /*padding=*/padValue,
+ /*inBounds=*/inBoundsVal);
+
+ if (llvm::equal(readShape, sourceShape) || !doMasking)
+ return transferReadOp;
+ SmallVector<OpFoldResult> mixedSourceDims =
+ tensor::getMixedSizes(builder, loc, source);
+ Value mask =
+ builder.create<vector::CreateMaskOp>(loc, maskType, mixedSourceDims);
+ return mlir::vector::maskOperation(builder, transferReadOp, mask)
+ ->getResult(0);
+}
+
+LogicalResult
+vector::isValidMaskedInputVector(ArrayRef<int64_t> shape,
+ ArrayRef<int64_t> inputVectorSizes) {
+ LDBG("Iteration space static sizes:");
+ LLVM_DEBUG(llvm::interleaveComma(shape, llvm::dbgs()));
+ LLVM_DEBUG(llvm::dbgs() << "\n");
+
+ if (inputVectorSizes.size() != shape.size()) {
+ LDBG("Input vector sizes don't match the number of loops");
+ return failure();
+ }
+ if (ShapedType::isDynamicShape(inputVectorSizes)) {
+ LDBG("Input vector sizes can't have dynamic dimensions");
+ return failure();
+ }
+ if (!llvm::all_of(llvm::zip(shape, inputVectorSizes),
+ [](std::tuple<int64_t, int64_t> sizePair) {
+ int64_t staticSize = std::get<0>(sizePair);
+ int64_t inputSize = std::get<1>(sizePair);
+ return ShapedType::isDynamic(staticSize) ||
+ staticSize <= inputSize;
+ })) {
+ LDBG("Input vector sizes must be greater than or equal to iteration space "
+ "static sizes");
+ return failure();
+ }
+ return success();
+}
>From 1897a650dcf5b428c81278e135729313c7b1c869 Mon Sep 17 00:00:00 2001
From: Lubo Litchev <lubol at google.com>
Date: Fri, 19 Apr 2024 17:17:35 +0000
Subject: [PATCH 5/6] Fixed some syntax errors, comments and other CRF.
---
.../mlir/Dialect/Vector/Utils/VectorUtils.h | 12 +++++++++---
.../Linalg/Transforms/Vectorization.cpp | 6 +++---
mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 18 ++++++++++++------
3 files changed, 24 insertions(+), 12 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index f7bea0c25813cb..0fb8a2591e0bf0 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -182,9 +182,15 @@ bool isLinearizableVector(VectorType type);
/// Create a TransferReadOp from `source` with static shape `readShape`. If the
/// vector type for the read is not the same as the type of `source`, then a
-/// mask is created on the read.
-/// enableMasking if false, the inBoundsVal values are set properly, based on
-/// the rank dimensions of the source and destination tensors.
+/// mask is created on the read, if use of mask is specified or the bounds on a
+/// dimension are different.
+///
+/// `enableMasking` if false, the inBoundsVal values are set properly, based on
+/// the rank dimensions of the source and destination tensors. And that is
+/// what determines if masking is done.
+///
+/// Note that the internal `vector::TransferReadOp` always read at indices zero
+/// for each dimension of the passed in tensor.
Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
ArrayRef<int64_t> readShape, Value padValue,
bool enableMasking = true);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 4b5e19cb4a4c30..30df53c2658dfa 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1516,8 +1516,8 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, tensor::PackOp packOp,
invertPermutationVector(outerDimsPerm));
for (auto [idx, size] : enumerate(innerTiles))
inputShape[innerDimsPos[idx]] *= size;
- auto maskedRead = createReadOrMaskedRead(rewriter, loc, packOp.getSource(),
- inputShape, padValue, doMasking);
+ auto maskedRead = vector::createReadOrMaskedRead(
+ rewriter, loc, packOp.getSource(), inputShape, padValue, doMasking);
// Create ShapeCastOp.
SmallVector<int64_t> destShape(inputVectorSizes);
@@ -1609,7 +1609,7 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, tensor::UnPackOp unpackOp,
// Read result, mask if necessary. If transferReadOp shape is not equal
// to shape of source, then a mask is necessary.
- Value readResult = createReadOrMaskedRead(
+ Value readResult = vector::createReadOrMaskedRead(
rewriter, loc, unpackOp.getSource(),
ArrayRef<int64_t>(readMaskShape.begin(), readMaskShape.end()), padValue);
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index 3057ac5d2f97b6..7d60e2a7f3e75e 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -30,6 +30,11 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
+#define DEBUG_TYPE "vector-utils"
+
+#define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
+#define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n")
+
using namespace mlir;
/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
@@ -329,20 +334,21 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
assert(llvm::none_of(readShape,
[](int64_t s) { return s == ShapedType::kDynamic; }) &&
"expected static shape");
- auto sourceShape = cast<ShapedType>(source.getType()).getShape();
+ auto sourceShapedType = cast<ShapedType>(source.getType());
+ auto sourceShape = sourceShapedType.getShape();
assert(sourceShape.size() == readShape.size() && "expected same ranks.");
auto maskType = VectorType::get(readShape, builder.getI1Type());
auto vectorType = VectorType::get(readShape, padValue.getType());
- assert(padValue.getType() == sourceShape.getElementType() &&
- "expected same pad element type to match source element type")
- int64_t readRank = readShape.size();
+ assert(padValue.getType() == sourceShapedType.getElementType() &&
+ "expected same pad element type to match source element type");
+ int64_t readRank = readShape.size();
auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
SmallVector<bool> inBoundsVal(readRank, true);
if (!enableMasking) {
// Update the inBounds attribute.
for (unsigned i = 0; i < readRank; i++)
inBoundsVal[i] = (sourceShape[i] == readShape[i]) &&
- !ShapedType::isDynamic(sourceShape[] i);
+ !ShapedType::isDynamic(sourceShape[i]);
}
auto transferReadOp = builder.create<vector::TransferReadOp>(
loc,
@@ -352,7 +358,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
/*padding=*/padValue,
/*inBounds=*/inBoundsVal);
- if (llvm::equal(readShape, sourceShape) || !doMasking)
+ if (llvm::equal(readShape, sourceShape) || !enableMasking)
return transferReadOp;
SmallVector<OpFoldResult> mixedSourceDims =
tensor::getMixedSizes(builder, loc, source);
>From 31a0b27ccbed7afc1f8389ca23ae33fb9b8e49fb Mon Sep 17 00:00:00 2001
From: Lubo Litchev <lubol at google.com>
Date: Fri, 19 Apr 2024 17:30:25 +0000
Subject: [PATCH 6/6] More CR addressed.
---
mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h | 2 +-
mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 7 ++++---
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index 0fb8a2591e0bf0..a56f2a78bdff0d 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -193,7 +193,7 @@ bool isLinearizableVector(VectorType type);
/// for each dimension of the passed in tensor.
Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
ArrayRef<int64_t> readShape, Value padValue,
- bool enableMasking = true);
+ bool useInBoundsInsteadOfMasking = true);
/// Returns success if `inputVectorSizes` is a valid masking configuraion for
/// given `shape`, i.e., it meets:
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index 7d60e2a7f3e75e..fcaf1ec944b479 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -330,7 +330,8 @@ bool vector::isLinearizableVector(VectorType type) {
Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
Value source, ArrayRef<int64_t> readShape,
- Value padValue, bool enableMasking) {
+ Value padValue,
+ bool useInBoundsInsteadOfMasking) {
assert(llvm::none_of(readShape,
[](int64_t s) { return s == ShapedType::kDynamic; }) &&
"expected static shape");
@@ -344,7 +345,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
int64_t readRank = readShape.size();
auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
SmallVector<bool> inBoundsVal(readRank, true);
- if (!enableMasking) {
+ if (!useInBoundsInsteadOfMasking) {
// Update the inBounds attribute.
for (unsigned i = 0; i < readRank; i++)
inBoundsVal[i] = (sourceShape[i] == readShape[i]) &&
@@ -358,7 +359,7 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
/*padding=*/padValue,
/*inBounds=*/inBoundsVal);
- if (llvm::equal(readShape, sourceShape) || !enableMasking)
+ if (llvm::equal(readShape, sourceShape) || !useInBoundsInsteadOfMasking)
return transferReadOp;
SmallVector<OpFoldResult> mixedSourceDims =
tensor::getMixedSizes(builder, loc, source);
More information about the Mlir-commits
mailing list