[Mlir-commits] [mlir] [mlir][NFC] update `mlir` create APIs (34/n) (PR #150660)

Maksim Levental llvmlistbot at llvm.org
Fri Jul 25 10:17:42 PDT 2025


https://github.com/makslevental created https://github.com/llvm/llvm-project/pull/150660

See https://github.com/llvm/llvm-project/pull/147168 for more info.

>From 74298fd141eb0a103961d7c30ad1571a6df4dd26 Mon Sep 17 00:00:00 2001
From: max <maksim.levental at gmail.com>
Date: Fri, 25 Jul 2025 13:17:23 -0400
Subject: [PATCH] [mlir][NFC] update `mlir` create APIs (34/n)

See https://github.com/llvm/llvm-project/pull/147168 for more info.
---
 .../Conversion/ArithToEmitC/ArithToEmitC.cpp  |  8 +--
 .../BufferizationToMemRef.cpp                 |  3 +-
 .../ControlFlowToSCF/ControlFlowToSCF.cpp     |  3 +-
 mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp |  3 +-
 mlir/lib/Conversion/LLVMCommon/Pattern.cpp    |  3 +-
 mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp   |  2 +-
 .../Conversion/MemRefToLLVM/MemRefToLLVM.cpp  |  3 +-
 .../Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp    | 12 ++--
 mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp | 12 ++--
 .../Conversion/TosaToLinalg/TosaToLinalg.cpp  | 57 +++++++------------
 .../TosaToLinalg/TosaToLinalgNamed.cpp        | 36 ++++--------
 .../Conversion/VectorToGPU/VectorToGPU.cpp    |  3 +-
 mlir/lib/Target/LLVMIR/ModuleImport.cpp       | 39 +++++++------
 .../Dialect/Shard/TestReshardingPartition.cpp |  9 ++-
 mlir/test/lib/Dialect/Test/TestPatterns.cpp   |  5 +-
 .../Dialect/Vector/TestVectorTransforms.cpp   | 12 ++--
 16 files changed, 82 insertions(+), 128 deletions(-)

diff --git a/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp b/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
index 59b3fe2e4eaed..c3debf7afc865 100644
--- a/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
+++ b/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
@@ -402,7 +402,7 @@ class CastConversion : public OpConversionPattern<ArithOp> {
     Value actualOp = adaptValueType(adaptor.getIn(), rewriter, castSrcType);
 
     // Actual cast (may change bitwidth)
-    auto cast = rewriter.template create<emitc::CastOp>(op.getLoc(),
+    auto cast = emitc::CastOp::create(rewriter, op.getLoc(),
                                                         castDestType, actualOp);
 
     // Cast to the expected output type
@@ -507,7 +507,7 @@ class IntegerOpConversion final : public OpConversionPattern<ArithOp> {
     Value lhs = adaptValueType(adaptor.getLhs(), rewriter, arithmeticType);
     Value rhs = adaptValueType(adaptor.getRhs(), rewriter, arithmeticType);
 
-    Value arithmeticResult = rewriter.template create<EmitCOp>(
+    Value arithmeticResult = EmitCOp::create(rewriter,
         op.getLoc(), arithmeticType, lhs, rhs);
 
     Value result = adaptValueType(arithmeticResult, rewriter, type);
@@ -547,7 +547,7 @@ class BitwiseOpConversion : public OpConversionPattern<ArithOp> {
     Value lhs = adaptValueType(adaptor.getLhs(), rewriter, arithmeticType);
     Value rhs = adaptValueType(adaptor.getRhs(), rewriter, arithmeticType);
 
-    Value arithmeticResult = rewriter.template create<EmitCOp>(
+    Value arithmeticResult = EmitCOp::create(rewriter,
         op.getLoc(), arithmeticType, lhs, rhs);
 
     Value result = adaptValueType(arithmeticResult, rewriter, type);
@@ -748,7 +748,7 @@ class ItoFCastOpConversion : public OpConversionPattern<CastOp> {
     }
     Value fpCastOperand = adaptor.getIn();
     if (actualOperandType != operandType) {
-      fpCastOperand = rewriter.template create<emitc::CastOp>(
+      fpCastOperand = emitc::CastOp::create(rewriter,
           castOp.getLoc(), actualOperandType, fpCastOperand);
     }
     rewriter.replaceOpWithNewOp<emitc::CastOp>(castOp, dstType, fpCastOperand);
diff --git a/mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp b/mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp
index 30a7170cf5c6a..d77cbfb1173c3 100644
--- a/mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp
+++ b/mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp
@@ -68,8 +68,7 @@ struct CloneOpConversion : public OpConversionPattern<bufferization::CloneOp> {
 
         scf::YieldOp::create(rewriter, loc, acc);
       };
-      auto size = rewriter
-                      .create<scf::ForOp>(loc, zero, rank, one, ValueRange(one),
+      auto size = scf::ForOp::create(rewriter, loc, zero, rank, one, ValueRange(one),
                                           loopBody)
                       .getResult(0);
 
diff --git a/mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp b/mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp
index c8311eb5a6433..f559fffbaf91f 100644
--- a/mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp
+++ b/mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp
@@ -144,8 +144,7 @@ ControlFlowToSCFTransformation::createUnreachableTerminator(Location loc,
     return emitError(loc, "Cannot create unreachable terminator for '")
            << parentOp->getName() << "'";
 
-  return builder
-      .create<func::ReturnOp>(
+  return func::ReturnOp::create(builder,
           loc, llvm::map_to_vector(funcOp.getResultTypes(),
                                    [&](Type type) {
                                      return getUndefValue(loc, builder, type);
diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
index a19194eb181fb..963f365cd688d 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
@@ -559,8 +559,7 @@ static Value createGroupReduceOpImpl(OpBuilder &builder, Location loc,
         builder, loc, builder.getI32Type(),
         builder.getIntegerAttr(builder.getI32Type(), *clusterSize));
 
-  return builder
-      .create<NonUniformOp>(loc, type, scope, groupOp, arg, clusterSizeValue)
+  return NonUniformOp::create(builder, loc, type, scope, groupOp, arg, clusterSizeValue)
       .getResult();
 }
 
diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
index ecd5b6367fba4..be58aa2e44e04 100644
--- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
@@ -273,8 +273,7 @@ LogicalResult ConvertToLLVMPattern::copyUnrankedDescriptors(
     // Allocate memory, copy, and free the source if necessary.
     Value memory =
         toDynamic
-            ? builder
-                  .create<LLVM::CallOp>(loc, mallocFunc.value(), allocationSize)
+            ? LLVM::CallOp::create(builder, loc, mallocFunc.value(), allocationSize)
                   .getResult()
             : LLVM::AllocaOp::create(builder, loc, getPtrType(),
                                      IntegerType::get(getContext(), 8),
diff --git a/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp b/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
index 5b68eb8188996..e5496e53ae529 100644
--- a/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
+++ b/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
@@ -35,7 +35,7 @@ static Op getOrDefineGlobal(ModuleOp &moduleOp, const Location loc,
   if (!(ret = moduleOp.lookupSymbol<Op>(name))) {
     ConversionPatternRewriter::InsertionGuard guard(rewriter);
     rewriter.setInsertionPointToStart(moduleOp.getBody());
-    ret = rewriter.template create<Op>(loc, std::forward<Args>(args)...);
+    ret = Op::create(rewriter, loc, std::forward<Args>(args)...);
   }
   return ret;
 }
diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index 53a19129103a3..88c08eb792737 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -575,8 +575,7 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> {
     Value sizePtr = LLVM::GEPOp::create(rewriter, loc, indexPtrTy,
                                         getTypeConverter()->getIndexType(),
                                         offsetPtr, idxPlusOne);
-    return rewriter
-        .create<LLVM::LoadOp>(loc, getTypeConverter()->getIndexType(), sizePtr)
+    return LLVM::LoadOp::create(rewriter, loc, getTypeConverter()->getIndexType(), sizePtr)
         .getResult();
   }
 
diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index aae3271371c1f..9b6154057b806 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -1493,11 +1493,11 @@ class ShiftPattern : public SPIRVToLLVMConversion<SPIRVOp> {
     Value extended;
     if (op2TypeWidth < dstTypeWidth) {
       if (isUnsignedIntegerOrVector(op2Type)) {
-        extended = rewriter.template create<LLVM::ZExtOp>(
-            loc, dstType, adaptor.getOperand2());
+        extended =
+            LLVM::ZExtOp::create(rewriter, loc, dstType, adaptor.getOperand2());
       } else {
-        extended = rewriter.template create<LLVM::SExtOp>(
-            loc, dstType, adaptor.getOperand2());
+        extended =
+            LLVM::SExtOp::create(rewriter, loc, dstType, adaptor.getOperand2());
       }
     } else if (op2TypeWidth == dstTypeWidth) {
       extended = adaptor.getOperand2();
@@ -1505,8 +1505,8 @@ class ShiftPattern : public SPIRVToLLVMConversion<SPIRVOp> {
       return failure();
     }
 
-    Value result = rewriter.template create<LLVMOp>(
-        loc, dstType, adaptor.getOperand1(), extended);
+    Value result =
+        LLVMOp::create(rewriter, loc, dstType, adaptor.getOperand1(), extended);
     rewriter.replaceOp(op, result);
     return success();
   }
diff --git a/mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp b/mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp
index 8525543760d99..34773f16f02a2 100644
--- a/mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp
+++ b/mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp
@@ -177,8 +177,7 @@ struct ConvertShardingOp : public OpConversionPattern<ShardingOp> {
     auto type = RankedTensorType::get({nSplits, 2}, i64);
     Value resHaloSizes =
         haloSizes.empty()
-            ? rewriter
-                  .create<tensor::EmptyOp>(loc, std::array<int64_t, 2>{0, 0},
+            ? tensor::EmptyOp::create(rewriter, loc, std::array<int64_t, 2>{0, 0},
                                            i64)
                   .getResult()
             : tensor::FromElementsOp::create(rewriter, loc, type, haloSizes)
@@ -307,8 +306,7 @@ class ConvertProcessLinearIndexOp
     Value commWorld =
         mpi::CommWorldOp::create(rewriter, loc, mpi::CommType::get(ctx));
     auto rank =
-        rewriter
-            .create<mpi::CommRankOp>(
+        mpi::CommRankOp::create(rewriter,
                 loc,
                 TypeRange{mpi::RetvalType::get(ctx), rewriter.getI32Type()},
                 commWorld)
@@ -704,8 +702,7 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
     for (auto &sz : haloSizes) {
       if (auto value = dyn_cast<Value>(sz))
         sz =
-            rewriter
-                .create<arith::IndexCastOp>(loc, rewriter.getIndexType(), value)
+            arith::IndexCastOp::create(rewriter, loc, rewriter.getIndexType(), value)
                 .getResult();
     }
 
@@ -758,8 +755,7 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
       assert(currHaloDim >= 0 && (size_t)currHaloDim < haloSizes.size() / 2);
       // Get the linearized ids of the neighbors (down and up) for the
       // given split
-      auto tmp = rewriter
-                     .create<NeighborsLinearIndicesOp>(loc, grid, myMultiIndex,
+      auto tmp = NeighborsLinearIndicesOp::create(rewriter, loc, grid, myMultiIndex,
                                                        splitAxes)
                      .getResults();
       // MPI operates on i32...
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index 5c7c027382977..b5792406b296f 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -569,8 +569,7 @@ static Value createLinalgBodyCalculationForElementwiseOp(
     // to UIToFP.
     if (srcTy.isUnsignedInteger() && isa<FloatType>(dstTy)) {
       auto unrealizedCast =
-          rewriter
-              .create<UnrealizedConversionCastOp>(
+          UnrealizedConversionCastOp::create(rewriter,
                   loc, rewriter.getIntegerType(srcTy.getIntOrFloatBitWidth()),
                   args[0])
               .getResult(0);
@@ -868,8 +867,7 @@ static Value broadcastDynamicDimension(PatternRewriter &rewriter, Location loc,
 
     // Emit 'linalg.generic' op
     auto resultTensor =
-        opBuilder
-            .create<linalg::GenericOp>(
+        linalg::GenericOp::create(opBuilder,
                 loc, outputTensor.getType(), operand, outputTensor, affineMaps,
                 getNParallelLoopsAttrs(rank),
                 [&](OpBuilder &opBuilder, Location loc, ValueRange blockArgs) {
@@ -1156,8 +1154,7 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
 
   // First fill the output buffer with the init value.
   auto emptyTensor =
-      rewriter
-          .create<tensor::EmptyOp>(loc, reduceShape, resultTy.getElementType(),
+      tensor::EmptyOp::create(rewriter, loc, reduceShape, resultTy.getElementType(),
                                    dynDims)
           .getResult();
 
@@ -1167,8 +1164,7 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
         op, "No initial value found for reduction operation");
 
   auto fillValue = arith::ConstantOp::create(rewriter, loc, fillValueAttr);
-  auto filledTensor = rewriter
-                          .create<linalg::FillOp>(loc, ValueRange{fillValue},
+  auto filledTensor = linalg::FillOp::create(rewriter, loc, ValueRange{fillValue},
                                                   ValueRange{emptyTensor})
                           .result();
   outputs.push_back(filledTensor);
@@ -1186,13 +1182,11 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
       auto trueAttr = rewriter.getBoolAttr(true);
       auto trueValue = arith::ConstantOp::create(rewriter, loc, trueAttr);
       auto emptyBoolTensor =
-          rewriter
-              .create<tensor::EmptyOp>(loc, reduceShape, trueValue.getType(),
+          tensor::EmptyOp::create(rewriter, loc, reduceShape, trueValue.getType(),
                                        dynDims)
               .getResult();
       auto allResultsNaNTensor =
-          rewriter
-              .create<linalg::FillOp>(loc, ValueRange{trueValue},
+          linalg::FillOp::create(rewriter, loc, ValueRange{trueValue},
                                       ValueRange{emptyBoolTensor})
               .result();
       // Note that because the linalg::ReduceOp has two variadic arguments
@@ -1261,21 +1255,18 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
         APFloat::getNaN(cast<FloatType>(elementTy).getFloatSemantics(), false));
     auto nanValue = arith::ConstantOp::create(rewriter, loc, nanValueAttr);
     auto emptyNanTensor =
-        rewriter
-            .create<tensor::EmptyOp>(loc, reduceShape,
+        tensor::EmptyOp::create(rewriter, loc, reduceShape,
                                      resultTy.getElementType(), dynDims)
             .getResult();
     auto nanFilledTensor =
-        rewriter
-            .create<linalg::FillOp>(loc, ValueRange{nanValue},
+        linalg::FillOp::create(rewriter, loc, ValueRange{nanValue},
                                     ValueRange{emptyNanTensor})
             .result();
 
     // Create an empty tensor, non need to fill this since it will be
     // overwritten by the select.
     auto finalEmptyTensor =
-        rewriter
-            .create<tensor::EmptyOp>(loc, reduceShape,
+        tensor::EmptyOp::create(rewriter, loc, reduceShape,
                                      resultTy.getElementType(), dynDims)
             .getResult();
 
@@ -1503,8 +1494,7 @@ class RescaleConverter : public OpRewritePattern<tosa::RescaleOp> {
           Value shift = shiftConstant ? shiftConstant : blockArgs[shiftArg];
 
           if (valueTy.isUnsignedInteger()) {
-            value = nestedBuilder
-                        .create<UnrealizedConversionCastOp>(
+            value = UnrealizedConversionCastOp::create(nestedBuilder,
                             nestedLoc,
                             nestedBuilder.getIntegerType(
                                 valueTy.getIntOrFloatBitWidth()),
@@ -1557,8 +1547,7 @@ class RescaleConverter : public OpRewritePattern<tosa::RescaleOp> {
           }
 
           if (outIntType.isUnsignedInteger()) {
-            value = nestedBuilder
-                        .create<UnrealizedConversionCastOp>(nestedLoc,
+            value = UnrealizedConversionCastOp::create(nestedBuilder, nestedLoc,
                                                             outIntType, value)
                         .getResult(0);
           }
@@ -2095,8 +2084,7 @@ class ReverseConverter : public OpRewritePattern<tosa::ReverseOp> {
     Value axisDimSize = tensor::DimOp::create(rewriter, loc, input, axis);
 
     // First fill the output buffer with the init value.
-    auto emptyTensor = rewriter
-                           .create<tensor::EmptyOp>(loc, inputTy.getShape(),
+    auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, inputTy.getShape(),
                                                     inputTy.getElementType(),
                                                     ArrayRef<Value>({dynDims}))
                            .getResult();
@@ -2241,21 +2229,18 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
     }
 
     // First fill the output buffer for the index.
-    auto emptyTensorIdx = rewriter
-                              .create<tensor::EmptyOp>(loc, resultTy.getShape(),
+    auto emptyTensorIdx = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
                                                        outElementTy, dynDims)
                               .getResult();
     auto fillValueIdx = arith::ConstantOp::create(
         rewriter, loc, rewriter.getIntegerAttr(outElementTy, 0));
     auto filledTensorIdx =
-        rewriter
-            .create<linalg::FillOp>(loc, ValueRange{fillValueIdx},
+        linalg::FillOp::create(rewriter, loc, ValueRange{fillValueIdx},
                                     ValueRange{emptyTensorIdx})
             .result();
 
     // Second fill the output buffer for the running max.
-    auto emptyTensorMax = rewriter
-                              .create<tensor::EmptyOp>(loc, resultTy.getShape(),
+    auto emptyTensorMax = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
                                                        inElementTy, dynDims)
                               .getResult();
     auto fillValueMaxAttr =
@@ -2268,8 +2253,7 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
     auto fillValueMax =
         arith::ConstantOp::create(rewriter, loc, fillValueMaxAttr);
     auto filledTensorMax =
-        rewriter
-            .create<linalg::FillOp>(loc, ValueRange{fillValueMax},
+        linalg::FillOp::create(rewriter, loc, ValueRange{fillValueMax},
                                     ValueRange{emptyTensorMax})
             .result();
 
@@ -2371,8 +2355,7 @@ class GatherConverter : public OpConversionPattern<tosa::GatherOp> {
 
     auto loc = op.getLoc();
     auto emptyTensor =
-        rewriter
-            .create<tensor::EmptyOp>(loc, resultTy.getShape(), resultElementTy,
+        tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(), resultElementTy,
                                      dynamicDims)
             .getResult();
 
@@ -2448,8 +2431,7 @@ class TableConverter : public OpRewritePattern<tosa::TableOp> {
       }
     }
 
-    auto emptyTensor = rewriter
-                           .create<tensor::EmptyOp>(loc, resultTy.getShape(),
+    auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
                                                     resultElementTy, dynDims)
                            .getResult();
 
@@ -2585,8 +2567,7 @@ struct RFFT2dConverter final : public OpRewritePattern<RFFT2dOp> {
         tensor::EmptyOp::create(rewriter, loc, type, dynamicSizes);
     auto fillValueAttr = rewriter.getZeroAttr(type.getElementType());
     auto fillValue = arith::ConstantOp::create(rewriter, loc, fillValueAttr);
-    auto filledTensor = rewriter
-                            .create<linalg::FillOp>(loc, ValueRange{fillValue},
+    auto filledTensor = linalg::FillOp::create(rewriter, loc, ValueRange{fillValue},
                                                     ValueRange{emptyTensor})
                             .result();
     return filledTensor;
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
index 3a205246ddd9e..3d7d4e8f6afa4 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
@@ -64,8 +64,7 @@ linalgIntBroadcastExtSIAdd(PatternRewriter &rewriter, Location loc, Value bias,
                            Value conv, Value result,
                            ArrayRef<AffineMap> indexingMaps) {
   ShapedType resultTy = cast<ShapedType>(conv.getType());
-  return rewriter
-      .create<linalg::GenericOp>(
+  return linalg::GenericOp::create(rewriter,
           loc, resultTy, ValueRange({bias, conv}), result, indexingMaps,
           getNParallelLoopsAttrs(resultTy.getRank()),
           [](OpBuilder &builder, Location loc, ValueRange args) {
@@ -124,8 +123,7 @@ static mlir::Value linalgBroadcastAndMaybeExt(PatternRewriter &rewriter,
   indexingMaps.push_back(rewriter.getMultiDimIdentityMap(resultRank));
 
   // Build the broadcast-like operation as a linalg.generic.
-  return rewriter
-      .create<linalg::GenericOp>(
+  return linalg::GenericOp::create(rewriter,
           loc, resultTy, ValueRange({source}), result, indexingMaps,
           getNParallelLoopsAttrs(resultTy.getRank()),
           [&resultTy](OpBuilder &builder, Location loc, ValueRange args) {
@@ -398,8 +396,7 @@ class ConvConverter : public OpConversionPattern<TosaConvOp> {
       auto kZpVal = arith::ConstantOp::create(rewriter, loc, kZp);
 
       Value conv =
-          rewriter
-              .create<LinalgConvQOp>(
+          LinalgConvQOp::create(rewriter,
                   loc, resultTy, ValueRange{input, weight, iZpVal, kZpVal},
                   ValueRange{broadcastBias}, strideAttr, dilationAttr)
               ->getResult(0);
@@ -408,8 +405,7 @@ class ConvConverter : public OpConversionPattern<TosaConvOp> {
       return success();
     }
 
-    Value conv = rewriter
-                     .create<LinalgConvOp>(
+    Value conv = LinalgConvOp::create(rewriter,
                          loc, accTy, ValueRange{input, weight},
                          ValueRange{broadcastBias}, strideAttr, dilationAttr)
                      ->getResult(0);
@@ -529,8 +525,7 @@ class DepthwiseConvConverter
     Value emptyTensor = tensor::EmptyOp::create(
         rewriter, loc, linalgConvTy.getShape(), accETy, filteredDims);
     Value zero = arith::ConstantOp::create(rewriter, loc, resultZeroAttr);
-    Value zeroTensor = rewriter
-                           .create<linalg::FillOp>(loc, ValueRange{zero},
+    Value zeroTensor = linalg::FillOp::create(rewriter, loc, ValueRange{zero},
                                                    ValueRange{emptyTensor})
                            .result();
 
@@ -544,8 +539,7 @@ class DepthwiseConvConverter
     indexingMaps.push_back(rewriter.getMultiDimIdentityMap(resultRank));
 
     if (hasNullZps) {
-      Value conv = rewriter
-                       .create<linalg::DepthwiseConv2DNhwcHwcmOp>(
+      Value conv = linalg::DepthwiseConv2DNhwcHwcmOp::create(rewriter,
                            loc, linalgConvTy, ValueRange{input, weight},
                            ValueRange{zeroTensor}, strideAttr, dilationAttr)
                        .getResult(0);
@@ -565,8 +559,7 @@ class DepthwiseConvConverter
           rewriter, loc, resultTy, conv, reassociationMap);
 
       Value result =
-          rewriter
-              .create<linalg::GenericOp>(
+          linalg::GenericOp::create(rewriter,
                   loc, resultTy, ValueRange({bias, convReshape}),
                   biasEmptyTensor, indexingMaps,
                   getNParallelLoopsAttrs(resultRank),
@@ -589,8 +582,7 @@ class DepthwiseConvConverter
       auto iZpVal = arith::ConstantOp::create(rewriter, loc, iZp);
       auto kZpVal = arith::ConstantOp::create(rewriter, loc, wZp);
       Value conv =
-          rewriter
-              .create<linalg::DepthwiseConv2DNhwcHwcmQOp>(
+          linalg::DepthwiseConv2DNhwcHwcmQOp::create(rewriter,
                   loc, linalgConvTy, ValueRange{input, weight, iZpVal, kZpVal},
                   ValueRange{zeroTensor}, strideAttr, dilationAttr)
               .getResult(0);
@@ -639,8 +631,7 @@ class MatMulConverter : public OpConversionPattern<tosa::MatMulOp> {
     auto emptyTensor =
         tensor::EmptyOp::create(rewriter, loc, outputTy.getShape(),
                                 outputTy.getElementType(), filteredDims);
-    Value zeroTensor = rewriter
-                           .create<linalg::FillOp>(loc, ValueRange{zero},
+    Value zeroTensor = linalg::FillOp::create(rewriter, loc, ValueRange{zero},
                                                    ValueRange{emptyTensor})
                            .result();
 
@@ -910,8 +901,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
         rewriter, loc, accTy.getShape(), accETy, dynamicDims);
 
     Value filledEmptyTensor =
-        rewriter
-            .create<linalg::FillOp>(loc, ValueRange{initialValue},
+        linalg::FillOp::create(rewriter, loc, ValueRange{initialValue},
                                     ValueRange{poolEmptyTensor})
             .result();
 
@@ -919,8 +909,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
         tensor::EmptyOp::create(rewriter, loc, kernel, accETy);
 
     // Sum across the pooled region.
-    Value poolingOp = rewriter
-                          .create<linalg::PoolingNhwcSumOp>(
+    Value poolingOp = linalg::PoolingNhwcSumOp::create(rewriter,
                               loc, ArrayRef<Type>{accTy},
                               ValueRange{paddedInput, fakeWindowDims},
                               filledEmptyTensor, strideAttr, dilationAttr)
@@ -1050,8 +1039,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
             Value shift = arith::AddIOp::create(rewriter, loc, k8, thirty8);
 
             auto scaled =
-                rewriter
-                    .create<tosa::ApplyScaleOp>(
+                tosa::ApplyScaleOp::create(rewriter,
                         loc, rewriter.getI32Type(), poolVal, multiplier, shift,
                         rewriter.getStringAttr("SINGLE_ROUND"))
                     .getResult();
diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
index 77aab85483a8b..d49dce6131f27 100644
--- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
+++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
@@ -483,8 +483,7 @@ struct CombineTransferReadOpTranspose final
 
     auto loc = op.getLoc();
     Value result =
-        rewriter
-            .create<vector::TransferReadOp>(
+        vector::TransferReadOp::create(rewriter,
                 loc, resultType, transferReadOp.getBase(),
                 transferReadOp.getIndices(), AffineMapAttr::get(newMap),
                 transferReadOp.getPadding(), transferReadOp.getMask(),
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index 94db7f8888129..58e3c44ec0049 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -142,6 +142,7 @@ static LogicalResult convertInstructionImpl(OpBuilder &odsBuilder,
   // TODO: Implement the `convertInstruction` hooks in the
   // `LLVMDialectLLVMIRImportInterface` and move the following include there.
 #include "mlir/Dialect/LLVMIR/LLVMOpFromLLVMIRConversions.inc"
+
   return failure();
 }
 
@@ -1626,12 +1627,11 @@ FailureOr<Value> ModuleImport::convertConstant(llvm::Constant *constant) {
   // Convert dso_local_equivalent.
   if (auto *dsoLocalEquivalent = dyn_cast<llvm::DSOLocalEquivalent>(constant)) {
     Type type = convertType(dsoLocalEquivalent->getType());
-    return builder
-        .create<DSOLocalEquivalentOp>(
-            loc, type,
-            FlatSymbolRefAttr::get(
-                builder.getContext(),
-                dsoLocalEquivalent->getGlobalValue()->getName()))
+    return DSOLocalEquivalentOp::create(
+               builder, loc, type,
+               FlatSymbolRefAttr::get(
+                   builder.getContext(),
+                   dsoLocalEquivalent->getGlobalValue()->getName()))
         .getResult();
   }
 
@@ -1736,9 +1736,9 @@ FailureOr<Value> ModuleImport::convertConstant(llvm::Constant *constant) {
         FlatSymbolRefAttr::get(context, blockAddr->getFunction()->getName());
     auto blockTag =
         BlockTagAttr::get(context, blockAddr->getBasicBlock()->getNumber());
-    return builder
-        .create<BlockAddressOp>(loc, convertType(blockAddr->getType()),
-                                BlockAddressAttr::get(context, fnSym, blockTag))
+    return BlockAddressOp::create(
+               builder, loc, convertType(blockAddr->getType()),
+               BlockAddressAttr::get(context, fnSym, blockTag))
         .getRes();
   }
 
@@ -2228,17 +2228,16 @@ LogicalResult ModuleImport::convertInstruction(llvm::Instruction *inst) {
         if (!resultTy)
           return failure();
         ArrayAttr operandAttrs = convertAsmInlineOperandAttrs(*callInst);
-        return builder
-            .create<InlineAsmOp>(
-                loc, resultTy, *operands,
-                builder.getStringAttr(asmI->getAsmString()),
-                builder.getStringAttr(asmI->getConstraintString()),
-                asmI->hasSideEffects(), asmI->isAlignStack(),
-                convertTailCallKindFromLLVM(callInst->getTailCallKind()),
-                AsmDialectAttr::get(
-                    mlirModule.getContext(),
-                    convertAsmDialectFromLLVM(asmI->getDialect())),
-                operandAttrs)
+        return InlineAsmOp::create(
+                   builder, loc, resultTy, *operands,
+                   builder.getStringAttr(asmI->getAsmString()),
+                   builder.getStringAttr(asmI->getConstraintString()),
+                   asmI->hasSideEffects(), asmI->isAlignStack(),
+                   convertTailCallKindFromLLVM(callInst->getTailCallKind()),
+                   AsmDialectAttr::get(
+                       mlirModule.getContext(),
+                       convertAsmDialectFromLLVM(asmI->getDialect())),
+                   operandAttrs)
             .getOperation();
       }
       bool isIncompatibleCall;
diff --git a/mlir/test/lib/Dialect/Shard/TestReshardingPartition.cpp b/mlir/test/lib/Dialect/Shard/TestReshardingPartition.cpp
index ac71ff60fc509..23fdad1bd624d 100644
--- a/mlir/test/lib/Dialect/Shard/TestReshardingPartition.cpp
+++ b/mlir/test/lib/Dialect/Shard/TestReshardingPartition.cpp
@@ -72,15 +72,14 @@ struct TestReshardingRewritePattern : OpRewritePattern<ShardOp> {
       ShapedType sourceShardShape =
           shardShapedType(op.getResult().getType(), grid, op.getSharding());
       TypedValue<ShapedType> sourceShard = cast<TypedValue<ShapedType>>(
-          builder
-              .create<UnrealizedConversionCastOp>(sourceShardShape, op.getSrc())
+          UnrealizedConversionCastOp::create(builder, sourceShardShape,
+                                             op.getSrc())
               ->getResult(0));
       TypedValue<ShapedType> targetShard =
           reshard(builder, grid, op, targetShardOp, sourceShard);
       Value newTargetUnsharded =
-          builder
-              .create<UnrealizedConversionCastOp>(
-                  targetShardOp.getResult().getType(), targetShard)
+          UnrealizedConversionCastOp::create(
+              builder, targetShardOp.getResult().getType(), targetShard)
               ->getResult(0);
       rewriter.replaceAllUsesWith(targetShardOp.getResult(),
                                   newTargetUnsharded);
diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index 0605bc59fef91..5fcd92eb37f3e 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -1007,9 +1007,8 @@ struct TestPassthroughInvalidOp : public ConversionPattern {
       // This is a 1:N replacement. Insert a test.cast op. (That's what the
       // argument materialization used to do.)
       flattened.push_back(
-          rewriter
-              .create<TestCastOp>(op->getLoc(),
-                                  op->getOperand(it.index()).getType(), range)
+          TestCastOp::create(rewriter, op->getLoc(),
+                             op->getOperand(it.index()).getType(), range)
               .getResult());
     }
     rewriter.replaceOpWithNewOp<TestValidOp>(op, TypeRange(), flattened,
diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index cf8353a4089ea..f89c944b5c564 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -569,10 +569,9 @@ static Value warpReduction(Location loc, OpBuilder &builder, Value input,
   Value laneVal = vector::ReductionOp::create(builder, loc, kind, input);
   // Parallel reduction using butterfly shuffles.
   for (uint64_t i = 1; i < size; i <<= 1) {
-    Value shuffled = builder
-                         .create<gpu::ShuffleOp>(loc, laneVal, i,
-                                                 /*width=*/size,
-                                                 /*mode=*/gpu::ShuffleMode::XOR)
+    Value shuffled = gpu::ShuffleOp::create(builder, loc, laneVal, i,
+                                            /*width=*/size,
+                                            /*mode=*/gpu::ShuffleMode::XOR)
                          .getShuffleResult();
     laneVal = makeArithReduction(builder, loc, kind, laneVal, shuffled);
   }
@@ -650,9 +649,8 @@ struct TestVectorDistribution
           arith::IndexCastOp::create(builder, loc, i32Type, srcIdx);
       Value warpSzI32 = arith::ConstantOp::create(
           builder, loc, builder.getIntegerAttr(i32Type, warpSz));
-      Value result = builder
-                         .create<gpu::ShuffleOp>(loc, val, srcIdxI32, warpSzI32,
-                                                 gpu::ShuffleMode::IDX)
+      Value result = gpu::ShuffleOp::create(builder, loc, val, srcIdxI32,
+                                            warpSzI32, gpu::ShuffleMode::IDX)
                          .getResult(0);
       return result;
     };



More information about the Mlir-commits mailing list