[Mlir-commits] [mlir] [mlir] Strip away lambdas (NFC) (PR #143280)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Sat Jun 7 11:15:51 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir-affine

Author: Kazu Hirata (kazutakahirata)

<details>
<summary>Changes</summary>

We don't need lambdas here.


---
Full diff: https://github.com/llvm/llvm-project/pull/143280.diff


4 Files Affected:

- (modified) mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp (+1-3) 
- (modified) mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp (+6-9) 
- (modified) mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp (+2-4) 
- (modified) mlir/lib/Dialect/Tensor/IR/TensorOps.cpp (+1-2) 


``````````diff
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index fe53d03249369..01cc500148385 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -402,9 +402,7 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
           return !VectorType::isValidElementType(type);
         }))
       return true;
-    return llvm::any_of(op.getResultTypes(), [](Type type) {
-      return !VectorType::isValidElementType(type);
-    });
+    return !llvm::all_of(op.getResultTypes(), VectorType::isValidElementType);
   });
   SmallVector<NestedMatch, 8> opsMatched;
   types.match(forOp, &opsMatched);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index c5b62227777a7..6b43006c4528a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -820,9 +820,8 @@ tensorExtractVectorizationPrecondition(Operation *op, bool vectorizeNDExtract) {
       return failure();
   }
 
-  if (llvm::any_of(extractOp->getResultTypes(), [](Type type) {
-        return !VectorType::isValidElementType(type);
-      })) {
+  if (!llvm::all_of(extractOp->getResultTypes(),
+                    VectorType::isValidElementType)) {
     return failure();
   }
 
@@ -2163,14 +2162,12 @@ static LogicalResult vectorizeLinalgOpPrecondition(
             })) {
       continue;
     }
-    if (llvm::any_of(innerOp.getOperandTypes(), [](Type type) {
-          return !VectorType::isValidElementType(type);
-        })) {
+    if (!llvm::all_of(innerOp.getOperandTypes(),
+                      VectorType::isValidElementType)) {
       return failure();
     }
-    if (llvm::any_of(innerOp.getResultTypes(), [](Type type) {
-          return !VectorType::isValidElementType(type);
-        })) {
+    if (!llvm::all_of(innerOp.getResultTypes(),
+                      VectorType::isValidElementType)) {
       return failure();
     }
   }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 2196199816292..34ae83b25c397 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -799,8 +799,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
                             "before singleton level";
 
     auto *curCOOEnd = std::find_if_not(it, lvlTypes.end(), isSingletonLT);
-    if (!std::all_of(it, curCOOEnd,
-                     [](LevelType i) { return isSingletonLT(i); }))
+    if (!std::all_of(it, curCOOEnd, isSingletonLT))
       return emitError() << "expected all singleton lvlTypes "
                             "following a singleton level";
     // We can potentially support mixed SoA/AoS singleton levels.
@@ -833,8 +832,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
       it != std::end(lvlTypes)) {
     if (it != lvlTypes.end() - 1)
       return emitError() << "expected n_out_of_m to be the last level type";
-    if (!std::all_of(lvlTypes.begin(), it,
-                     [](LevelType i) { return isDenseLT(i); }))
+    if (!std::all_of(lvlTypes.begin(), it, isDenseLT))
       return emitError() << "expected all dense lvlTypes "
                             "before a n_out_of_m level";
     if (dimToLvl && (dimToLvl.getNumDims() != dimToLvl.getNumResults())) {
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 6e67377ddb6e8..04242cad9ecb6 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -1061,8 +1061,7 @@ void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
 void EmptyOp::build(OpBuilder &builder, OperationState &result,
                     ArrayRef<int64_t> staticShape, Type elementType,
                     Attribute encoding) {
-  assert(all_of(staticShape,
-                [](int64_t sz) { return !ShapedType::isDynamic(sz); }) &&
+  assert(none_of(staticShape, ShapedType::isDynamic) &&
          "expected only static sizes");
   build(builder, result, staticShape, elementType, ValueRange{}, encoding);
 }

``````````

</details>


https://github.com/llvm/llvm-project/pull/143280


More information about the Mlir-commits mailing list