[Mlir-commits] [mlir] [mlir][NFC] update `mlir/Dialect` create APIs (32/n) (PR #150657)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Jul 25 10:19:39 PDT 2025


github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {clang-format}-->


:warning: C/C++ code formatter, clang-format found issues in your code. :warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
git-clang-format --diff HEAD~1 HEAD --extensions cpp -- mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp mlir/lib/Dialect/Linalg/Transforms/Padding.cpp mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp mlir/lib/Dialect/Vector/IR/VectorOps.cpp mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
``````````

</details>

<details>
<summary>
View the diff from clang-format here.
</summary>

``````````diff
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index b154c69d2..27b661781 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -792,7 +792,7 @@ struct FoldFillWithPad final : public OpRewritePattern<tensor::PadOp> {
                                 padOp.getResultType().getElementType());
     Value replacement =
         FillOp::create(rewriter, fillOp.getLoc(), ValueRange{padValue},
-                            ValueRange{emptyTensor})
+                       ValueRange{emptyTensor})
             .getResult(0);
     if (replacement.getType() != padOp.getResultType()) {
       replacement = tensor::CastOp::create(rewriter, fillOp.getLoc(),
@@ -2153,8 +2153,8 @@ struct SwapTransposeWithBroadcast : OpRewritePattern<linalg::TransposeOp> {
 
     // Create broadcast(transpose(input)).
     Value transposeResult =
-        TransposeOp::create(rewriter, loc, broadcastOp.getInput(), transposeInit,
-                                 resultPerms)
+        TransposeOp::create(rewriter, loc, broadcastOp.getInput(),
+                            transposeInit, resultPerms)
             ->getResult(0);
     rewriter.replaceOpWithNewOp<BroadcastOp>(
         transposeOp, transposeResult, transposeOp.getInit(), resultDimensions);
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 6625267f0..bdfc8d020 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -4134,7 +4134,7 @@ DiagnosedSilenceableFailure doit(RewriterBase &rewriter, OpTy target,
       rewriter, target.getLoc(), target.getDest(), target.getMixedOffsets(),
       target.getMixedSizes(), target.getMixedStrides());
   Value copied = linalg::CopyOp::create(rewriter, target.getLoc(),
-                                             target.getSource(), extracted)
+                                        target.getSource(), extracted)
                      .getResult(0);
   // Reset the insertion point.
   rewriter.setInsertionPoint(target);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
index 6dc5bf3a1..0a9c17664 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
@@ -1144,8 +1144,8 @@ pushDownUnPackOpThroughGenericOp(RewriterBase &rewriter, GenericOp genericOp,
   // Insert an unPackOp right after the packed generic.
   Value unPackOpRes =
       linalg::UnPackOp::create(rewriter, genericOp.getLoc(), newResult,
-                                    destPack.getSource(), innerDimsPos,
-                                    mixedTiles, outerDimsPerm)
+                               destPack.getSource(), innerDimsPos, mixedTiles,
+                               outerDimsPerm)
           .getResult();
 
   return std::make_tuple(newGenericOp, unPackOpRes);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index d3af23b62..7f9ba1bdd 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -267,7 +267,8 @@ expandValue(RewriterBase &rewriter, Location loc, Value result, Value origDest,
   assert(rankReductionStrategy ==
              ControlDropUnitDims::RankReductionStrategy::ReassociativeReshape &&
          "unknown rank reduction strategy");
-  return tensor::ExpandShapeOp::create(rewriter, loc, origResultType, result, reassociation)
+  return tensor::ExpandShapeOp::create(rewriter, loc, origResultType, result,
+                                       reassociation)
       .getResult();
 }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 92342abcc..3bd763ea0 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -1572,10 +1572,12 @@ static Value getCollapsedOpOperand(Location loc, LinalgOp op,
 
   // Insert a reshape to collapse the dimensions.
   if (isa<MemRefType>(operand.getType())) {
-    return memref::CollapseShapeOp::create(builder, loc, operand, operandReassociation)
+    return memref::CollapseShapeOp::create(builder, loc, operand,
+                                           operandReassociation)
         .getResult();
   }
-  return tensor::CollapseShapeOp::create(builder, loc, operand, operandReassociation)
+  return tensor::CollapseShapeOp::create(builder, loc, operand,
+                                         operandReassociation)
       .getResult();
 }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp
index 091266e49..277e50bd5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp
@@ -82,7 +82,7 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
     if (operand.getType() == newOperandType)
       return operand;
     return tensor::ExpandShapeOp::create(rewriter, loc, newOperandType, operand,
-                                       reassociation)
+                                         reassociation)
         .getResult();
   }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
index e4182b145..dd8437985 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
@@ -333,15 +333,16 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
   for (auto it :
        llvm::zip(paddedSubtensorResults, opToPad.getDpsInitsMutable())) {
     if (options.copyBackOp == LinalgPaddingOptions::CopyBackOp::LinalgCopy) {
-      replacements.push_back(linalg::CopyOp::create(rewriter, loc, std::get<0>(it),
-                                                         std::get<1>(it).get())
+      replacements.push_back(linalg::CopyOp::create(rewriter, loc,
+                                                    std::get<0>(it),
+                                                    std::get<1>(it).get())
                                  .getResult(0));
     } else if (options.copyBackOp ==
                LinalgPaddingOptions::CopyBackOp::
                    BufferizationMaterializeInDestination) {
       replacements.push_back(
-          bufferization::MaterializeInDestinationOp::create(rewriter,
-                  loc, std::get<0>(it), std::get<1>(it).get())
+          bufferization::MaterializeInDestinationOp::create(
+              rewriter, loc, std::get<0>(it), std::get<1>(it).get())
               ->getResult(0));
     } else {
       llvm_unreachable("unsupported copy back op");
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 475b0f947..bb725f2ad 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -947,8 +947,9 @@ DecomposePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
   auto getIdxValue = [&](OpFoldResult ofr) {
     if (auto val = llvm::dyn_cast_if_present<Value>(ofr))
       return val;
-    return arith::ConstantIndexOp::create(rewriter,
-            padOp.getLoc(), cast<IntegerAttr>(cast<Attribute>(ofr)).getInt())
+    return arith::ConstantIndexOp::create(
+               rewriter, padOp.getLoc(),
+               cast<IntegerAttr>(cast<Attribute>(ofr)).getInt())
         .getResult();
   };
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp b/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
index 20fb22334..35453e2d2 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
@@ -70,8 +70,8 @@ FailureOr<Operation *> transposeConv2DHelper(RewriterBase &rewriter,
     input = tensor::EmptyOp::create(rewriter, loc, newFilterShape, elementTy)
                 .getResult();
   } else {
-    input = memref::AllocOp::create(rewriter,
-                    loc, MemRefType::get(newFilterShape, elementTy))
+    input = memref::AllocOp::create(rewriter, loc,
+                                    MemRefType::get(newFilterShape, elementTy))
                 .getResult();
   }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 4733d617f..0170837cf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -3714,7 +3714,8 @@ struct Conv1DGenerator
     }
     }
 
-    return vector::TransferWriteOp::create(rewriter, loc, res, resShaped, resPadding)
+    return vector::TransferWriteOp::create(rewriter, loc, res, resShaped,
+                                           resPadding)
         .getOperation();
   }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp b/mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp
index da8ff88cc..b80b27fe5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp
@@ -398,9 +398,9 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,
 
       retRows = GMatrix.rows;
       auto matmulType = RankedTensorType::get({retRows, filterW}, elementType);
-      auto empty =
-          tensor::EmptyOp::create(builder, loc, matmulType.getShape(), elementType)
-              .getResult();
+      auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
+                                           elementType)
+                       .getResult();
       auto init =
           linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
 
@@ -421,9 +421,9 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,
 
       auto matmulType =
           RankedTensorType::get({retRows, GTMatrix.cols}, elementType);
-      auto empty =
-          tensor::EmptyOp::create(builder, loc, matmulType.getShape(), elementType)
-              .getResult();
+      auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
+                                           elementType)
+                       .getResult();
       auto init =
           linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
 
@@ -545,9 +545,9 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,
 
       retRows = BTMatrix.rows;
       auto matmulType = RankedTensorType::get({retRows, alphaW}, elementType);
-      auto empty =
-          tensor::EmptyOp::create(builder, loc, matmulType.getShape(), elementType)
-              .getResult();
+      auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
+                                           elementType)
+                       .getResult();
       auto init =
           linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
 
@@ -569,9 +569,9 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,
 
       retCols = BMatrix.cols;
       auto matmulType = RankedTensorType::get({retRows, retCols}, elementType);
-      auto empty =
-          tensor::EmptyOp::create(builder, loc, matmulType.getShape(), elementType)
-              .getResult();
+      auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
+                                           elementType)
+                       .getResult();
       auto init =
           linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
       Value B =
@@ -658,7 +658,7 @@ static Value matrixMultiply(RewriterBase &rewriter, Location loc,
        inputShape[2] * inputShape[3] * inputShape[4], filterShape[3]},
       outputElementType);
   Value empty = tensor::EmptyOp::create(rewriter, loc, matmulType.getShape(),
-                                             outputElementType)
+                                        outputElementType)
                     .getResult();
   Value zero = arith::ConstantOp::create(
       rewriter, loc, rewriter.getZeroAttr(outputElementType));
@@ -777,8 +777,8 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
       auto matmulType = RankedTensorType::get({retRows, valueW}, elementType);
       Value init = outInitVal;
       if (rightTransform || scalarFactor != 1) {
-        auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
-                                                  elementType)
+        auto empty = tensor::EmptyOp::create(builder, loc,
+                                             matmulType.getShape(), elementType)
                          .getResult();
         init = linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
       }
@@ -796,8 +796,8 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
           RankedTensorType::get({retRows, AMatrix.cols}, elementType);
       Value init = outInitVal;
       if (scalarFactor != 1) {
-        auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
-                                                  elementType)
+        auto empty = tensor::EmptyOp::create(builder, loc,
+                                             matmulType.getShape(), elementType)
                          .getResult();
         init = linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
       }
@@ -820,22 +820,21 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
           AffineMap::get(2, 0, context), identityAffineMap, identityAffineMap};
 
       matmulRetValue =
-          linalg::GenericOp::create(rewriter,
-                  loc, matmulType,
-                  ValueRange{scalarFactorValue, matmulRetValue},
-                  ValueRange{outInitVal}, affineMaps,
-                  llvm::ArrayRef<utils::IteratorType>{
-                      utils::IteratorType::parallel,
-                      utils::IteratorType::parallel},
-                  [&](OpBuilder &nestedBuilder, Location nestedLoc,
-                      ValueRange args) {
-                    auto mulf = arith::MulFOp::create(nestedBuilder, nestedLoc,
-                                                      args[0], args[1]);
-                    auto addf = arith::AddFOp::create(
-                        nestedBuilder, nestedLoc, mulf.getResult(), args[2]);
-                    linalg::YieldOp::create(nestedBuilder, nestedLoc,
-                                            addf.getResult());
-                  })
+          linalg::GenericOp::create(
+              rewriter, loc, matmulType,
+              ValueRange{scalarFactorValue, matmulRetValue},
+              ValueRange{outInitVal}, affineMaps,
+              llvm::ArrayRef<utils::IteratorType>{
+                  utils::IteratorType::parallel, utils::IteratorType::parallel},
+              [&](OpBuilder &nestedBuilder, Location nestedLoc,
+                  ValueRange args) {
+                auto mulf = arith::MulFOp::create(nestedBuilder, nestedLoc,
+                                                  args[0], args[1]);
+                auto addf = arith::AddFOp::create(nestedBuilder, nestedLoc,
+                                                  mulf.getResult(), args[2]);
+                linalg::YieldOp::create(nestedBuilder, nestedLoc,
+                                        addf.getResult());
+              })
               .getResult(0);
     }
 
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 1a3f972a4..bce358d89 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -372,8 +372,8 @@ SmallVector<Value> vector::getAsValues(OpBuilder &builder, Location loc,
   llvm::transform(foldResults, std::back_inserter(values),
                   [&](OpFoldResult foldResult) {
                     if (auto attr = dyn_cast<Attribute>(foldResult))
-                      return arith::ConstantIndexOp::create(builder,
-                              loc, cast<IntegerAttr>(attr).getInt())
+                      return arith::ConstantIndexOp::create(
+                                 builder, loc, cast<IntegerAttr>(attr).getInt())
                           .getResult();
 
                     return cast<Value>(foldResult);
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 647fe8c78..8de87fef9 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -410,8 +410,8 @@ FailureOr<Value> combineContractAndBroadcast(vector::ContractionOp contractOp,
           oldMaskType.getScalableDims().drop_front(unusedDimsBitVector.count());
       VectorType maskOpType =
           VectorType::get(newShape, rewriter.getI1Type(), newShapeScalableDims);
-      mask = vector::ShapeCastOp::create(rewriter, contractOp.getLoc(), maskOpType,
-                                              maskingOp.getMask())
+      mask = vector::ShapeCastOp::create(rewriter, contractOp.getLoc(),
+                                         maskOpType, maskingOp.getMask())
                  .getResult();
     }
 

``````````

</details>


https://github.com/llvm/llvm-project/pull/150657


More information about the Mlir-commits mailing list