[Mlir-commits] [mlir] 85480a4 - [mlir] Directly call ShapedType::isDynamic without lambdas (NFC) (#142994)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Jun 5 16:14:31 PDT 2025
Author: Kazu Hirata
Date: 2025-06-05T16:14:27-07:00
New Revision: 85480a4d37b4d3eaf5ea86f642978cc834e1a47e
URL: https://github.com/llvm/llvm-project/commit/85480a4d37b4d3eaf5ea86f642978cc834e1a47e
DIFF: https://github.com/llvm/llvm-project/commit/85480a4d37b4d3eaf5ea86f642978cc834e1a47e.diff
LOG: [mlir] Directly call ShapedType::isDynamic without lambdas (NFC) (#142994)
We do not need lambdas in these places.
Added:
Modified:
mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
mlir/lib/Interfaces/ViewLikeInterface.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index 3d4dcdee2663b..6051aea849971 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -131,9 +131,8 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
assert(!getCopy() && "no dim sizes specified when copying a tensor");
assert(isDynamicDim(idx) && "expected dynamic size");
ArrayRef<int64_t> shape = getType().getShape();
- return std::count_if(
- shape.begin(), shape.begin() + idx,
- [&](int64_t size) { return ShapedType::isDynamic(size); });
+ return std::count_if(shape.begin(), shape.begin() + idx,
+ ShapedType::isDynamic);
}
// Return the Value of the dynamic size of the tensor at dimension
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
index dd8ef9608a821..7188987e5e938 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
@@ -876,11 +876,8 @@ class BubbleUpPackOpThroughReshapeOp final
return failure();
}
// Currently only support static inner tile sizes.
- if (llvm::any_of(packOp.getStaticTiles(), [](int64_t size) {
- return ShapedType::isDynamic(size);
- })) {
+ if (llvm::any_of(packOp.getStaticTiles(), ShapedType::isDynamic))
return failure();
- }
// User controlled propagation function.
if (!controlFn(&packOp.getSourceMutable()))
@@ -1002,11 +999,8 @@ class PushDownUnPackOpThroughReshapeOp final
return failure();
}
// Currently only support static inner tile sizes.
- if (llvm::any_of(unPackOp.getStaticTiles(), [](int64_t size) {
- return ShapedType::isDynamic(size);
- })) {
+ if (llvm::any_of(unPackOp.getStaticTiles(), ShapedType::isDynamic))
return failure();
- }
Operation *consumerOp = *result.user_begin();
return TypeSwitch<Operation *, LogicalResult>(consumerOp)
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 8718c57b9e86c..615d1f66414b9 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -227,8 +227,7 @@ FailureOr<LowerPackResult> linalg::lowerPack(RewriterBase &rewriter,
// 1. Filter out NYI cases.
auto packedTensorType =
cast<RankedTensorType>(packOp->getResultTypes().front());
- if (llvm::any_of(packOp.getStaticInnerTiles(),
- [](int64_t size) { return ShapedType::isDynamic(size); })) {
+ if (llvm::any_of(packOp.getStaticInnerTiles(), ShapedType::isDynamic)) {
return rewriter.notifyMatchFailure(
packOp,
"non-static shape NYI, needs a more powerful tensor.expand_shape op");
diff --git a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
index 2b229d60c691b..657624b817af2 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
@@ -185,13 +185,11 @@ struct BubbleUpExpandThroughParallelCollapse
ArrayRef<int64_t> collapsedStaticShapes = staticSourceSize.slice(
collapseReassociation.front(), collapseReassociation.size());
int64_t numCollapsedDynamic =
- llvm::count_if(collapsedStaticShapes,
- [](int64_t d) { return ShapedType::isDynamic(d); });
+ llvm::count_if(collapsedStaticShapes, ShapedType::isDynamic);
ArrayRef<int64_t> expandedStaticShapes = staticResultSize.slice(
expandReassociation.front(), expandReassociation.size());
int64_t numExpandedDynamic =
- llvm::count_if(expandedStaticShapes,
- [](int64_t d) { return ShapedType::isDynamic(d); });
+ llvm::count_if(expandedStaticShapes, ShapedType::isDynamic);
if (numCollapsedDynamic > 1 || numExpandedDynamic > 1 ||
collapsedStaticShapes != expandedStaticShapes) {
return failure();
diff --git a/mlir/lib/Interfaces/ViewLikeInterface.cpp b/mlir/lib/Interfaces/ViewLikeInterface.cpp
index 70dd7b4aec88c..d70cfebbe53e6 100644
--- a/mlir/lib/Interfaces/ViewLikeInterface.cpp
+++ b/mlir/lib/Interfaces/ViewLikeInterface.cpp
@@ -27,9 +27,7 @@ LogicalResult mlir::verifyListOfOperandsOrIntegers(Operation *op,
return op->emitError("expected ") << numElements << " " << name
<< " values, got " << staticVals.size();
unsigned expectedNumDynamicEntries =
- llvm::count_if(staticVals, [](int64_t staticVal) {
- return ShapedType::isDynamic(staticVal);
- });
+ llvm::count_if(staticVals, ShapedType::isDynamic);
if (values.size() != expectedNumDynamicEntries)
return op->emitError("expected ")
<< expectedNumDynamicEntries << " dynamic " << name << " values";
@@ -270,5 +268,5 @@ bool mlir::detail::sameOffsetsSizesAndStrides(
unsigned mlir::detail::getNumDynamicEntriesUpToIdx(ArrayRef<int64_t> staticVals,
unsigned idx) {
return std::count_if(staticVals.begin(), staticVals.begin() + idx,
- [&](int64_t val) { return ShapedType::isDynamic(val); });
+ ShapedType::isDynamic);
}
More information about the Mlir-commits
mailing list