[flang-commits] [flang] [mlir][NFC] update `flang/Optimizer/Transforms` create APIs (11/n) (PR #149915)

Maksim Levental via flang-commits flang-commits at lists.llvm.org
Mon Jul 21 14:59:18 PDT 2025


https://github.com/makslevental created https://github.com/llvm/llvm-project/pull/149915

See https://github.com/llvm/llvm-project/pull/147168 for more info.

>From c46be1a7b6242bf07ca83e8cf5ef759717542b69 Mon Sep 17 00:00:00 2001
From: max <maksim.levental at gmail.com>
Date: Mon, 21 Jul 2025 17:57:16 -0400
Subject: [PATCH] [mlir][NFC] update `flang/Optimizer/Transforms` create APIs
 (11/n) (#149687)

See https://github.com/llvm/llvm-project/pull/147168 for more info.
---
 .../Optimizer/Transforms/AbstractResult.cpp   |  16 +-
 .../Optimizer/Transforms/AffineDemotion.cpp   |   4 +-
 .../Optimizer/Transforms/AffinePromotion.cpp  |  20 +-
 .../Optimizer/Transforms/ArrayValueCopy.cpp   |  44 ++---
 .../Transforms/AssumedRankOpConversion.cpp    |   4 +-
 .../Transforms/CUFAddConstructor.cpp          |  18 +-
 .../CUFComputeSharedMemoryOffsetsAndSize.cpp  |   6 +-
 .../Transforms/CUFGPUToLLVMConversion.cpp     |  44 ++---
 .../Optimizer/Transforms/CUFOpConversion.cpp  |  86 ++++-----
 .../Transforms/CharacterConversion.cpp        |  24 +--
 .../ConstantArgumentGlobalisation.cpp         |   6 +-
 .../Transforms/ControlFlowConverter.cpp       |  64 +++---
 .../Transforms/DebugTypeGenerator.cpp         |   4 +-
 flang/lib/Optimizer/Transforms/FIRToSCF.cpp   |  18 +-
 .../Transforms/GenRuntimeCallsForTest.cpp     |   4 +-
 .../Optimizer/Transforms/LoopVersioning.cpp   |  28 +--
 .../Optimizer/Transforms/MemoryAllocation.cpp |   4 +-
 .../lib/Optimizer/Transforms/MemoryUtils.cpp  |  18 +-
 .../Transforms/PolymorphicOpConversion.cpp    |  54 +++---
 .../Transforms/SimplifyFIROperations.cpp      |  14 +-
 .../Transforms/SimplifyIntrinsics.cpp         | 182 +++++++++---------
 .../lib/Optimizer/Transforms/StackArrays.cpp  |   4 +-
 22 files changed, 333 insertions(+), 333 deletions(-)

diff --git a/flang/lib/Optimizer/Transforms/AbstractResult.cpp b/flang/lib/Optimizer/Transforms/AbstractResult.cpp
index 59e2eeb76c715..4899822bbb9f4 100644
--- a/flang/lib/Optimizer/Transforms/AbstractResult.cpp
+++ b/flang/lib/Optimizer/Transforms/AbstractResult.cpp
@@ -137,7 +137,7 @@ class CallConversion : public mlir::OpRewritePattern<Op> {
     auto buffer = saveResult.getMemref();
     mlir::Value arg = buffer;
     if (mustEmboxResult(result.getType(), shouldBoxResult))
-      arg = rewriter.create<fir::EmboxOp>(
+      arg = fir::EmboxOp::create(rewriter,
           loc, argType, buffer, saveResult.getShape(), /*slice*/ mlir::Value{},
           saveResult.getTypeparams());
 
@@ -155,7 +155,7 @@ class CallConversion : public mlir::OpRewritePattern<Op> {
         if (!isResultBuiltinCPtr)
           newOperands.emplace_back(arg);
         newOperands.append(op.getOperands().begin(), op.getOperands().end());
-        newOp = rewriter.create<fir::CallOp>(loc, *op.getCallee(),
+        newOp = fir::CallOp::create(rewriter, loc, *op.getCallee(),
                                              newResultTypes, newOperands);
       } else {
         // Indirect calls.
@@ -169,12 +169,12 @@ class CallConversion : public mlir::OpRewritePattern<Op> {
 
         llvm::SmallVector<mlir::Value> newOperands;
         newOperands.push_back(
-            rewriter.create<fir::ConvertOp>(loc, newFuncTy, op.getOperand(0)));
+            fir::ConvertOp::create(rewriter, loc, newFuncTy, op.getOperand(0)));
         if (!isResultBuiltinCPtr)
           newOperands.push_back(arg);
         newOperands.append(op.getOperands().begin() + 1,
                            op.getOperands().end());
-        newOp = rewriter.create<fir::CallOp>(loc, mlir::SymbolRefAttr{},
+        newOp = fir::CallOp::create(rewriter, loc, mlir::SymbolRefAttr{},
                                              newResultTypes, newOperands);
       }
     }
@@ -191,7 +191,7 @@ class CallConversion : public mlir::OpRewritePattern<Op> {
         passArgPos =
             rewriter.getI32IntegerAttr(*op.getPassArgPos() + passArgShift);
       // TODO: propagate argument and result attributes (need to be shifted).
-      newOp = rewriter.create<fir::DispatchOp>(
+      newOp = fir::DispatchOp::create(rewriter,
           loc, newResultTypes, rewriter.getStringAttr(op.getMethod()),
           op.getOperands()[0], newOperands, passArgPos,
           /*arg_attrs=*/nullptr, /*res_attrs=*/nullptr,
@@ -280,7 +280,7 @@ processReturnLikeOp(OpTy ret, mlir::Value newArg,
     // register pass, this is possible for fir.box results, or fir.record
     // with no length parameters. Simply store the result in the result
     // storage. at the return point.
-    rewriter.create<fir::StoreOp>(loc, resultValue, newArg);
+    fir::StoreOp::create(rewriter, loc, resultValue, newArg);
     rewriter.replaceOpWithNewOp<OpTy>(ret);
   }
   // Delete result old local storage if unused.
@@ -337,7 +337,7 @@ class AddrOfOpConversion : public mlir::OpRewritePattern<fir::AddrOfOp> {
       newFuncTy = getCPtrFunctionType(oldFuncTy);
     else
       newFuncTy = getNewFunctionType(oldFuncTy, shouldBoxResult);
-    auto newAddrOf = rewriter.create<fir::AddrOfOp>(addrOf.getLoc(), newFuncTy,
+    auto newAddrOf = fir::AddrOfOp::create(rewriter, addrOf.getLoc(), newFuncTy,
                                                     addrOf.getSymbol());
     // Rather than converting all op a function pointer might transit through
     // (e.g calls, stores, loads, converts...), cast new type to the abstract
@@ -397,7 +397,7 @@ class AbstractResultOpt
         if (mustEmboxResult(resultType, shouldBoxResult)) {
           auto bufferType = fir::ReferenceType::get(resultType);
           rewriter.setInsertionPointToStart(&func.front());
-          newArg = rewriter.create<fir::BoxAddrOp>(loc, bufferType, newArg);
+          newArg = fir::BoxAddrOp::create(rewriter, loc, bufferType, newArg);
         }
         patterns.insert<ReturnOpConversion>(context, newArg);
         target.addDynamicallyLegalOp<mlir::func::ReturnOp>(
diff --git a/flang/lib/Optimizer/Transforms/AffineDemotion.cpp b/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
index d45f855c9078e..d08c9d83b9e68 100644
--- a/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
@@ -60,7 +60,7 @@ class AffineLoadConversion
     if (!maybeExpandedMap)
       return failure();
 
-    auto coorOp = rewriter.create<fir::CoordinateOp>(
+    auto coorOp = fir::CoordinateOp::create(rewriter,
         op.getLoc(), fir::ReferenceType::get(op.getResult().getType()),
         adaptor.getMemref(), *maybeExpandedMap);
 
@@ -83,7 +83,7 @@ class AffineStoreConversion
     if (!maybeExpandedMap)
       return failure();
 
-    auto coorOp = rewriter.create<fir::CoordinateOp>(
+    auto coorOp = fir::CoordinateOp::create(rewriter,
         op.getLoc(), fir::ReferenceType::get(op.getValueToStore().getType()),
         adaptor.getMemref(), *maybeExpandedMap);
     rewriter.replaceOpWithNewOp<fir::StoreOp>(op, adaptor.getValue(),
diff --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
index ef82e400bea14..d8d971f9aa3d0 100644
--- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
@@ -366,7 +366,7 @@ static mlir::Type coordinateArrayElement(fir::ArrayCoorOp op) {
 static void populateIndexArgs(fir::ArrayCoorOp acoOp, fir::ShapeOp shape,
                               SmallVectorImpl<mlir::Value> &indexArgs,
                               mlir::PatternRewriter &rewriter) {
-  auto one = rewriter.create<mlir::arith::ConstantOp>(
+  auto one = mlir::arith::ConstantOp::create(rewriter,
       acoOp.getLoc(), rewriter.getIndexType(), rewriter.getIndexAttr(1));
   auto extents = shape.getExtents();
   for (auto i = extents.begin(); i < extents.end(); i++) {
@@ -379,7 +379,7 @@ static void populateIndexArgs(fir::ArrayCoorOp acoOp, fir::ShapeOp shape,
 static void populateIndexArgs(fir::ArrayCoorOp acoOp, fir::ShapeShiftOp shape,
                               SmallVectorImpl<mlir::Value> &indexArgs,
                               mlir::PatternRewriter &rewriter) {
-  auto one = rewriter.create<mlir::arith::ConstantOp>(
+  auto one = mlir::arith::ConstantOp::create(rewriter,
       acoOp.getLoc(), rewriter.getIndexType(), rewriter.getIndexAttr(1));
   auto extents = shape.getPairs();
   for (auto i = extents.begin(); i < extents.end();) {
@@ -422,12 +422,12 @@ createAffineOps(mlir::Value arrayRef, mlir::PatternRewriter &rewriter) {
 
   populateIndexArgs(acoOp, indexArgs, rewriter);
 
-  auto affineApply = rewriter.create<affine::AffineApplyOp>(
+  auto affineApply = affine::AffineApplyOp::create(rewriter,
       acoOp.getLoc(), affineMap, indexArgs);
   auto arrayElementType = coordinateArrayElement(acoOp);
   auto newType =
       mlir::MemRefType::get({mlir::ShapedType::kDynamic}, arrayElementType);
-  auto arrayConvert = rewriter.create<fir::ConvertOp>(acoOp.getLoc(), newType,
+  auto arrayConvert = fir::ConvertOp::create(rewriter, acoOp.getLoc(), newType,
                                                       acoOp.getMemref());
   return std::make_pair(affineApply, arrayConvert);
 }
@@ -495,7 +495,7 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
                                 affineFor.getRegionIterArgs());
     if (!results.empty()) {
       rewriter.setInsertionPointToEnd(affineFor.getBody());
-      rewriter.create<affine::AffineYieldOp>(resultOp->getLoc(), results);
+      affine::AffineYieldOp::create(rewriter, resultOp->getLoc(), results);
     }
     rewriter.finalizeOpModification(affineFor.getOperation());
 
@@ -525,7 +525,7 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
   std::pair<affine::AffineForOp, mlir::Value>
   positiveConstantStep(fir::DoLoopOp op, int64_t step,
                        mlir::PatternRewriter &rewriter) const {
-    auto affineFor = rewriter.create<affine::AffineForOp>(
+    auto affineFor = affine::AffineForOp::create(rewriter,
         op.getLoc(), ValueRange(op.getLowerBound()),
         mlir::AffineMap::get(0, 1,
                              mlir::getAffineSymbolExpr(0, op.getContext())),
@@ -543,7 +543,7 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
     auto step = mlir::getAffineSymbolExpr(2, op.getContext());
     mlir::AffineMap upperBoundMap = mlir::AffineMap::get(
         0, 3, (upperBound - lowerBound + step).floorDiv(step));
-    auto genericUpperBound = rewriter.create<affine::AffineApplyOp>(
+    auto genericUpperBound = affine::AffineApplyOp::create(rewriter,
         op.getLoc(), upperBoundMap,
         ValueRange({op.getLowerBound(), op.getUpperBound(), op.getStep()}));
     auto actualIndexMap = mlir::AffineMap::get(
@@ -551,7 +551,7 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
         (lowerBound + mlir::getAffineDimExpr(0, op.getContext())) *
             mlir::getAffineSymbolExpr(1, op.getContext()));
 
-    auto affineFor = rewriter.create<affine::AffineForOp>(
+    auto affineFor = affine::AffineForOp::create(rewriter,
         op.getLoc(), ValueRange(),
         AffineMap::getConstantMap(0, op.getContext()),
         genericUpperBound.getResult(),
@@ -559,7 +559,7 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
                              1 + mlir::getAffineSymbolExpr(0, op.getContext())),
         1, op.getIterOperands());
     rewriter.setInsertionPointToStart(affineFor.getBody());
-    auto actualIndex = rewriter.create<affine::AffineApplyOp>(
+    auto actualIndex = affine::AffineApplyOp::create(rewriter,
         op.getLoc(), actualIndexMap,
         ValueRange(
             {affineFor.getInductionVar(), op.getLowerBound(), op.getStep()}));
@@ -588,7 +588,7 @@ class AffineIfConversion : public mlir::OpRewritePattern<fir::IfOp> {
               << "AffineIfConversion: couldn't calculate affine condition\n";);
       return failure();
     }
-    auto affineIf = rewriter.create<affine::AffineIfOp>(
+    auto affineIf = affine::AffineIfOp::create(rewriter,
         op.getLoc(), affineCondition.getIntegerSet(),
         affineCondition.getAffineArgs(), !op.getElseRegion().empty());
     rewriter.startOpModification(affineIf);
diff --git a/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp b/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp
index 8544d17f62248..7f24624a47788 100644
--- a/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp
+++ b/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp
@@ -856,7 +856,7 @@ static bool getAdjustedExtents(mlir::Location loc,
   auto idxTy = rewriter.getIndexType();
   if (isAssumedSize(result)) {
     // Use slice information to compute the extent of the column.
-    auto one = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 1);
+    auto one = mlir::arith::ConstantIndexOp::create(rewriter, loc, 1);
     mlir::Value size = one;
     if (mlir::Value sliceArg = arrLoad.getSlice()) {
       if (auto sliceOp =
@@ -896,14 +896,14 @@ static mlir::Value getOrReadExtentsAndShapeOp(
         mlir::cast<SequenceType>(dyn_cast_ptrOrBoxEleTy(boxTy)).getDimension();
     auto idxTy = rewriter.getIndexType();
     for (decltype(rank) dim = 0; dim < rank; ++dim) {
-      auto dimVal = rewriter.create<mlir::arith::ConstantIndexOp>(loc, dim);
-      auto dimInfo = rewriter.create<BoxDimsOp>(loc, idxTy, idxTy, idxTy,
+      auto dimVal = mlir::arith::ConstantIndexOp::create(rewriter, loc, dim);
+      auto dimInfo = BoxDimsOp::create(rewriter, loc, idxTy, idxTy, idxTy,
                                                 arrLoad.getMemref(), dimVal);
       result.emplace_back(dimInfo.getResult(1));
     }
     if (!arrLoad.getShape()) {
       auto shapeType = ShapeType::get(rewriter.getContext(), rank);
-      return rewriter.create<ShapeOp>(loc, shapeType, result);
+      return ShapeOp::create(rewriter, loc, shapeType, result);
     }
     auto shiftOp = arrLoad.getShape().getDefiningOp<ShiftOp>();
     auto shapeShiftType = ShapeShiftType::get(rewriter.getContext(), rank);
@@ -912,7 +912,7 @@ static mlir::Value getOrReadExtentsAndShapeOp(
       shapeShiftOperands.push_back(lb);
       shapeShiftOperands.push_back(extent);
     }
-    return rewriter.create<ShapeShiftOp>(loc, shapeShiftType,
+    return ShapeShiftOp::create(rewriter, loc, shapeShiftType,
                                          shapeShiftOperands);
   }
   copyUsingSlice =
@@ -952,12 +952,12 @@ static mlir::Value genCoorOp(mlir::PatternRewriter &rewriter,
   auto module = load->getParentOfType<mlir::ModuleOp>();
   FirOpBuilder builder(rewriter, module);
   auto typeparams = getTypeParamsIfRawData(loc, builder, load, alloc.getType());
-  mlir::Value result = rewriter.create<ArrayCoorOp>(
+  mlir::Value result = ArrayCoorOp::create(rewriter,
       loc, eleTy, alloc, shape, slice,
       llvm::ArrayRef<mlir::Value>{originated}.take_front(dimension),
       typeparams);
   if (dimension < originated.size())
-    result = rewriter.create<fir::CoordinateOp>(
+    result = fir::CoordinateOp::create(rewriter,
         loc, resTy, result,
         llvm::ArrayRef<mlir::Value>{originated}.drop_front(dimension));
   return result;
@@ -971,12 +971,12 @@ static mlir::Value getCharacterLen(mlir::Location loc, FirOpBuilder &builder,
       // The loaded array is an emboxed value. Get the CHARACTER length from
       // the box value.
       auto eleSzInBytes =
-          builder.create<BoxEleSizeOp>(loc, charLenTy, load.getMemref());
+          BoxEleSizeOp::create(builder, loc, charLenTy, load.getMemref());
       auto kindSize =
           builder.getKindMap().getCharacterBitsize(charTy.getFKind());
       auto kindByteSize =
           builder.createIntegerConstant(loc, charLenTy, kindSize / 8);
-      return builder.create<mlir::arith::DivSIOp>(loc, eleSzInBytes,
+      return mlir::arith::DivSIOp::create(builder, loc, eleSzInBytes,
                                                   kindByteSize);
     }
     // The loaded array is a (set of) unboxed values. If the CHARACTER's
@@ -1003,11 +1003,11 @@ void genArrayCopy(mlir::Location loc, mlir::PatternRewriter &rewriter,
   auto idxTy = rewriter.getIndexType();
   // Build loop nest from column to row.
   for (auto sh : llvm::reverse(extents)) {
-    auto ubi = rewriter.create<ConvertOp>(loc, idxTy, sh);
-    auto zero = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 0);
-    auto one = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 1);
-    auto ub = rewriter.create<mlir::arith::SubIOp>(loc, idxTy, ubi, one);
-    auto loop = rewriter.create<DoLoopOp>(loc, zero, ub, one);
+    auto ubi = ConvertOp::create(rewriter, loc, idxTy, sh);
+    auto zero = mlir::arith::ConstantIndexOp::create(rewriter, loc, 0);
+    auto one = mlir::arith::ConstantIndexOp::create(rewriter, loc, 1);
+    auto ub = mlir::arith::SubIOp::create(rewriter, loc, idxTy, ubi, one);
+    auto loop = DoLoopOp::create(rewriter, loc, zero, ub, one);
     rewriter.setInsertionPointToStart(loop.getBody());
     indices.push_back(loop.getInductionVar());
   }
@@ -1015,12 +1015,12 @@ void genArrayCopy(mlir::Location loc, mlir::PatternRewriter &rewriter,
   std::reverse(indices.begin(), indices.end());
   auto module = arrLoad->getParentOfType<mlir::ModuleOp>();
   FirOpBuilder builder(rewriter, module);
-  auto fromAddr = rewriter.create<ArrayCoorOp>(
+  auto fromAddr = ArrayCoorOp::create(rewriter,
       loc, getEleTy(src.getType()), src, shapeOp,
       CopyIn && copyUsingSlice ? sliceOp : mlir::Value{},
       factory::originateIndices(loc, rewriter, src.getType(), shapeOp, indices),
       getTypeParamsIfRawData(loc, builder, arrLoad, src.getType()));
-  auto toAddr = rewriter.create<ArrayCoorOp>(
+  auto toAddr = ArrayCoorOp::create(rewriter,
       loc, getEleTy(dst.getType()), dst, shapeOp,
       !CopyIn && copyUsingSlice ? sliceOp : mlir::Value{},
       factory::originateIndices(loc, rewriter, dst.getType(), shapeOp, indices),
@@ -1093,14 +1093,14 @@ allocateArrayTemp(mlir::Location loc, mlir::PatternRewriter &rewriter,
       findNonconstantExtents(baseType, extents);
   llvm::SmallVector<mlir::Value> typeParams =
       genArrayLoadTypeParameters(loc, rewriter, load);
-  mlir::Value allocmem = rewriter.create<AllocMemOp>(
+  mlir::Value allocmem = AllocMemOp::create(rewriter,
       loc, dyn_cast_ptrOrBoxEleTy(baseType), typeParams, nonconstantExtents);
   mlir::Type eleType =
       fir::unwrapSequenceType(fir::unwrapPassByRefType(baseType));
   if (fir::isRecordWithAllocatableMember(eleType)) {
     // The allocatable component descriptors need to be set to a clean
     // deallocated status before anything is done with them.
-    mlir::Value box = rewriter.create<fir::EmboxOp>(
+    mlir::Value box = fir::EmboxOp::create(rewriter,
         loc, fir::BoxType::get(allocmem.getType()), allocmem, shape,
         /*slice=*/mlir::Value{}, typeParams);
     auto module = load->getParentOfType<mlir::ModuleOp>();
@@ -1111,12 +1111,12 @@ allocateArrayTemp(mlir::Location loc, mlir::PatternRewriter &rewriter,
     auto cleanup = [=](mlir::PatternRewriter &r) {
       FirOpBuilder builder(r, module);
       runtime::genDerivedTypeDestroy(builder, loc, box);
-      r.create<FreeMemOp>(loc, allocmem);
+      FreeMemOp::create(r, loc, allocmem);
     };
     return {allocmem, cleanup};
   }
   auto cleanup = [=](mlir::PatternRewriter &r) {
-    r.create<FreeMemOp>(loc, allocmem);
+    FreeMemOp::create(r, loc, allocmem);
   };
   return {allocmem, cleanup};
 }
@@ -1257,7 +1257,7 @@ class ArrayUpdateConversion : public ArrayUpdateConversionBase<ArrayUpdateOp> {
       if (auto inEleTy = dyn_cast_ptrEleTy(input.getType())) {
         emitFatalError(loc, "array_update on references not supported");
       } else {
-        rewriter.create<fir::StoreOp>(loc, input, coor);
+        fir::StoreOp::create(rewriter, loc, input, coor);
       }
     };
     auto lhsEltRefType = toRefType(update.getMerge().getType());
@@ -1368,7 +1368,7 @@ class ArrayAmendConversion : public mlir::OpRewritePattern<ArrayAmendOp> {
     auto *op = amend.getOperation();
     rewriter.setInsertionPoint(op);
     auto loc = amend.getLoc();
-    auto undef = rewriter.create<UndefOp>(loc, amend.getType());
+    auto undef = UndefOp::create(rewriter, loc, amend.getType());
     rewriter.replaceOp(amend, undef.getResult());
     return mlir::success();
   }
diff --git a/flang/lib/Optimizer/Transforms/AssumedRankOpConversion.cpp b/flang/lib/Optimizer/Transforms/AssumedRankOpConversion.cpp
index 6af1cb988a4c1..72c5c8cf1f65c 100644
--- a/flang/lib/Optimizer/Transforms/AssumedRankOpConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/AssumedRankOpConversion.cpp
@@ -88,7 +88,7 @@ class ReboxAssumedRankConv
         (fir::isPolymorphicType(oldBoxType) ||
          (newEleType != oldBoxType.unwrapInnerType())) &&
         !fir::isPolymorphicType(newBoxType)) {
-      newDtype = builder.create<fir::TypeDescOp>(
+      newDtype = fir::TypeDescOp::create(builder,
           loc, mlir::TypeAttr::get(newDerivedType));
     } else {
       newDtype = builder.createNullConstant(loc);
@@ -103,7 +103,7 @@ class ReboxAssumedRankConv
                                              rebox.getBox(), newDtype,
                                              newAttribute, lowerBoundModifier);
 
-    mlir::Value descValue = builder.create<fir::LoadOp>(loc, tempDesc);
+    mlir::Value descValue = fir::LoadOp::create(builder, loc, tempDesc);
     mlir::Value castDesc = builder.createConvert(loc, newBoxType, descValue);
     rewriter.replaceOp(rebox, castDesc);
     return mlir::success();
diff --git a/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp b/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp
index 2dd6950b34897..a74586653c260 100644
--- a/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp
@@ -68,7 +68,7 @@ struct CUFAddConstructor
 
     // Symbol reference to CUFRegisterAllocator.
     builder.setInsertionPointToEnd(mod.getBody());
-    auto registerFuncOp = builder.create<mlir::LLVM::LLVMFuncOp>(
+    auto registerFuncOp = mlir::LLVM::LLVMFuncOp::create(builder,
         loc, RTNAME_STRING(CUFRegisterAllocator), funcTy);
     registerFuncOp.setVisibility(mlir::SymbolTable::Visibility::Private);
     auto cufRegisterAllocatorRef = mlir::SymbolRefAttr::get(
@@ -76,16 +76,16 @@ struct CUFAddConstructor
     builder.setInsertionPointToEnd(mod.getBody());
 
     // Create the constructor function that call CUFRegisterAllocator.
-    auto func = builder.create<mlir::LLVM::LLVMFuncOp>(loc, cudaFortranCtorName,
+    auto func = mlir::LLVM::LLVMFuncOp::create(builder, loc, cudaFortranCtorName,
                                                        funcTy);
     func.setLinkage(mlir::LLVM::Linkage::Internal);
     builder.setInsertionPointToStart(func.addEntryBlock(builder));
-    builder.create<mlir::LLVM::CallOp>(loc, funcTy, cufRegisterAllocatorRef);
+    mlir::LLVM::CallOp::create(builder, loc, funcTy, cufRegisterAllocatorRef);
 
     auto gpuMod = symTab.lookup<mlir::gpu::GPUModuleOp>(cudaDeviceModuleName);
     if (gpuMod) {
       auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(ctx);
-      auto registeredMod = builder.create<cuf::RegisterModuleOp>(
+      auto registeredMod = cuf::RegisterModuleOp::create(builder,
           loc, llvmPtrTy, mlir::SymbolRefAttr::get(ctx, gpuMod.getName()));
 
       fir::LLVMTypeConverter typeConverter(mod, /*applyTBAA=*/false,
@@ -96,7 +96,7 @@ struct CUFAddConstructor
           auto kernelName = mlir::SymbolRefAttr::get(
               builder.getStringAttr(cudaDeviceModuleName),
               {mlir::SymbolRefAttr::get(builder.getContext(), func.getName())});
-          builder.create<cuf::RegisterKernelOp>(loc, kernelName, registeredMod);
+          cuf::RegisterKernelOp::create(builder, loc, kernelName, registeredMod);
         }
       }
 
@@ -140,19 +140,19 @@ struct CUFAddConstructor
           auto sizeVal = builder.createIntegerConstant(loc, idxTy, *size);
 
           // Global variable address
-          mlir::Value addr = builder.create<fir::AddrOfOp>(
+          mlir::Value addr = fir::AddrOfOp::create(builder,
               loc, globalOp.resultType(), globalOp.getSymbol());
 
           llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
               builder, loc, fTy, registeredMod, addr, gblName, sizeVal)};
-          builder.create<fir::CallOp>(loc, func, args);
+          fir::CallOp::create(builder, loc, func, args);
         } break;
         default:
           break;
         }
       }
     }
-    builder.create<mlir::LLVM::ReturnOp>(loc, mlir::ValueRange{});
+    mlir::LLVM::ReturnOp::create(builder, loc, mlir::ValueRange{});
 
     // Create the llvm.global_ctor with the function.
     // TODO: We might want to have a utility that retrieve it if already
@@ -165,7 +165,7 @@ struct CUFAddConstructor
     llvm::SmallVector<mlir::Attribute> data;
     priorities.push_back(0);
     data.push_back(mlir::LLVM::ZeroAttr::get(mod.getContext()));
-    builder.create<mlir::LLVM::GlobalCtorsOp>(
+    mlir::LLVM::GlobalCtorsOp::create(builder,
         mod.getLoc(), builder.getArrayAttr(funcs),
         builder.getI32ArrayAttr(priorities), builder.getArrayAttr(data));
   }
diff --git a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
index f6381ef8a8a21..beba8b822e467 100644
--- a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
@@ -93,10 +93,10 @@ struct CUFComputeSharedMemoryOffsetsAndSize
           mlir::Value dynSize =
               builder.createIntegerConstant(loc, idxTy, tySize);
           for (auto extent : sharedOp.getShape())
-            dynSize = builder.create<mlir::arith::MulIOp>(loc, dynSize, extent);
+            dynSize = mlir::arith::MulIOp::create(builder, loc, dynSize, extent);
           if (crtDynOffset)
             crtDynOffset =
-                builder.create<mlir::arith::AddIOp>(loc, crtDynOffset, dynSize);
+                mlir::arith::AddIOp::create(builder, loc, crtDynOffset, dynSize);
           else
             crtDynOffset = dynSize;
 
@@ -142,7 +142,7 @@ struct CUFComputeSharedMemoryOffsetsAndSize
           fir::GlobalOp::getDataAttrAttrName(globalOpName),
           cuf::DataAttributeAttr::get(gpuMod.getContext(),
                                       cuf::DataAttribute::Shared)));
-      auto sharedMem = builder.create<fir::GlobalOp>(
+      auto sharedMem = fir::GlobalOp::create(builder,
           funcOp.getLoc(), sharedMemGlobalName, false, false, sharedMemType,
           init, linkage, attrs);
       sharedMem.setAlignment(alignment);
diff --git a/flang/lib/Optimizer/Transforms/CUFGPUToLLVMConversion.cpp b/flang/lib/Optimizer/Transforms/CUFGPUToLLVMConversion.cpp
index fe69ffa8350af..374d9b3381b01 100644
--- a/flang/lib/Optimizer/Transforms/CUFGPUToLLVMConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFGPUToLLVMConversion.cpp
@@ -46,27 +46,27 @@ static mlir::Value createKernelArgArray(mlir::Location loc,
   auto structTy = mlir::LLVM::LLVMStructType::getLiteral(ctx, structTypes);
   auto ptrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
   mlir::Type i32Ty = rewriter.getI32Type();
-  auto zero = rewriter.create<mlir::LLVM::ConstantOp>(
+  auto zero = mlir::LLVM::ConstantOp::create(rewriter,
       loc, i32Ty, rewriter.getIntegerAttr(i32Ty, 0));
-  auto one = rewriter.create<mlir::LLVM::ConstantOp>(
+  auto one = mlir::LLVM::ConstantOp::create(rewriter,
       loc, i32Ty, rewriter.getIntegerAttr(i32Ty, 1));
   mlir::Value argStruct =
-      rewriter.create<mlir::LLVM::AllocaOp>(loc, ptrTy, structTy, one);
-  auto size = rewriter.create<mlir::LLVM::ConstantOp>(
+      mlir::LLVM::AllocaOp::create(rewriter, loc, ptrTy, structTy, one);
+  auto size = mlir::LLVM::ConstantOp::create(rewriter,
       loc, i32Ty, rewriter.getIntegerAttr(i32Ty, structTypes.size()));
   mlir::Value argArray =
-      rewriter.create<mlir::LLVM::AllocaOp>(loc, ptrTy, ptrTy, size);
+      mlir::LLVM::AllocaOp::create(rewriter, loc, ptrTy, ptrTy, size);
 
   for (auto [i, arg] : llvm::enumerate(operands)) {
-    auto indice = rewriter.create<mlir::LLVM::ConstantOp>(
+    auto indice = mlir::LLVM::ConstantOp::create(rewriter,
         loc, i32Ty, rewriter.getIntegerAttr(i32Ty, i));
-    mlir::Value structMember = rewriter.create<LLVM::GEPOp>(
+    mlir::Value structMember = LLVM::GEPOp::create(rewriter,
         loc, ptrTy, structTy, argStruct,
         mlir::ArrayRef<mlir::Value>({zero, indice}));
-    rewriter.create<LLVM::StoreOp>(loc, arg, structMember);
-    mlir::Value arrayMember = rewriter.create<LLVM::GEPOp>(
+    LLVM::StoreOp::create(rewriter, loc, arg, structMember);
+    mlir::Value arrayMember = LLVM::GEPOp::create(rewriter,
         loc, ptrTy, ptrTy, argArray, mlir::ArrayRef<mlir::Value>({indice}));
-    rewriter.create<LLVM::StoreOp>(loc, structMember, arrayMember);
+    LLVM::StoreOp::create(rewriter, loc, structMember, arrayMember);
   }
   return argArray;
 }
@@ -94,7 +94,7 @@ struct GPULaunchKernelConversion
     mlir::Value dynamicMemorySize = op.getDynamicSharedMemorySize();
     mlir::Type i32Ty = rewriter.getI32Type();
     if (!dynamicMemorySize)
-      dynamicMemorySize = rewriter.create<mlir::LLVM::ConstantOp>(
+      dynamicMemorySize = mlir::LLVM::ConstantOp::create(rewriter,
           loc, i32Ty, rewriter.getIntegerAttr(i32Ty, 0));
 
     mlir::Value kernelArgs =
@@ -108,17 +108,17 @@ struct GPULaunchKernelConversion
       if (!funcOp)
         return mlir::failure();
       kernelPtr =
-          rewriter.create<LLVM::AddressOfOp>(loc, ptrTy, funcOp.getName());
+          LLVM::AddressOfOp::create(rewriter, loc, ptrTy, funcOp.getName());
     } else {
       kernelPtr =
-          rewriter.create<LLVM::AddressOfOp>(loc, ptrTy, kernel.getName());
+          LLVM::AddressOfOp::create(rewriter, loc, ptrTy, kernel.getName());
     }
 
     auto llvmIntPtrType = mlir::IntegerType::get(
         ctx, this->getTypeConverter()->getPointerBitwidth(0));
     auto voidTy = mlir::LLVM::LLVMVoidType::get(ctx);
 
-    mlir::Value nullPtr = rewriter.create<LLVM::ZeroOp>(loc, ptrTy);
+    mlir::Value nullPtr = LLVM::ZeroOp::create(rewriter, loc, ptrTy);
 
     if (op.hasClusterSize()) {
       auto funcOp = mod.lookupSymbol<mlir::LLVM::LLVMFuncOp>(
@@ -134,7 +134,7 @@ struct GPULaunchKernelConversion
       if (!funcOp) {
         mlir::OpBuilder::InsertionGuard insertGuard(rewriter);
         rewriter.setInsertionPointToStart(mod.getBody());
-        auto launchKernelFuncOp = rewriter.create<mlir::LLVM::LLVMFuncOp>(
+        auto launchKernelFuncOp = mlir::LLVM::LLVMFuncOp::create(rewriter,
             loc, RTNAME_STRING(CUFLaunchClusterKernel), funcTy);
         launchKernelFuncOp.setVisibility(
             mlir::SymbolTable::Visibility::Private);
@@ -148,7 +148,7 @@ struct GPULaunchKernelConversion
         stream = adaptor.getAsyncDependencies().front();
       }
 
-      rewriter.create<mlir::LLVM::CallOp>(
+      mlir::LLVM::CallOp::create(rewriter,
           loc, funcTy, cufLaunchClusterKernel,
           mlir::ValueRange{kernelPtr, adaptor.getClusterSizeX(),
                            adaptor.getClusterSizeY(), adaptor.getClusterSizeZ(),
@@ -178,7 +178,7 @@ struct GPULaunchKernelConversion
         mlir::OpBuilder::InsertionGuard insertGuard(rewriter);
         rewriter.setInsertionPointToStart(mod.getBody());
         auto launchKernelFuncOp =
-            rewriter.create<mlir::LLVM::LLVMFuncOp>(loc, fctName, funcTy);
+            mlir::LLVM::LLVMFuncOp::create(rewriter, loc, fctName, funcTy);
         launchKernelFuncOp.setVisibility(
             mlir::SymbolTable::Visibility::Private);
       }
@@ -191,7 +191,7 @@ struct GPULaunchKernelConversion
         stream = adaptor.getAsyncDependencies().front();
       }
 
-      rewriter.create<mlir::LLVM::CallOp>(
+      mlir::LLVM::CallOp::create(rewriter,
           loc, funcTy, cufLaunchKernel,
           mlir::ValueRange{kernelPtr, adaptor.getGridSizeX(),
                            adaptor.getGridSizeY(), adaptor.getGridSizeZ(),
@@ -222,10 +222,10 @@ static mlir::Value createAddressOfOp(mlir::ConversionPatternRewriter &rewriter,
   auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(
       rewriter.getContext(), mlir::NVVM::NVVMMemorySpace::kSharedMemorySpace);
   if (auto g = gpuMod.lookupSymbol<fir::GlobalOp>(sharedGlobalName))
-    return rewriter.create<mlir::LLVM::AddressOfOp>(loc, llvmPtrTy,
+    return mlir::LLVM::AddressOfOp::create(rewriter, loc, llvmPtrTy,
                                                     g.getSymName());
   if (auto g = gpuMod.lookupSymbol<mlir::LLVM::GlobalOp>(sharedGlobalName))
-    return rewriter.create<mlir::LLVM::AddressOfOp>(loc, llvmPtrTy,
+    return mlir::LLVM::AddressOfOp::create(rewriter, loc, llvmPtrTy,
                                                     g.getSymName());
   return {};
 }
@@ -255,12 +255,12 @@ struct CUFSharedMemoryOpConversion
     if (!sharedGlobalAddr)
       mlir::emitError(loc, "Could not find the shared global operation\n");
 
-    auto castPtr = rewriter.create<mlir::LLVM::AddrSpaceCastOp>(
+    auto castPtr = mlir::LLVM::AddrSpaceCastOp::create(rewriter,
         loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()),
         sharedGlobalAddr);
     mlir::Type baseType = castPtr->getResultTypes().front();
     llvm::SmallVector<mlir::LLVM::GEPArg> gepArgs = {op.getOffset()};
-    mlir::Value shmemPtr = rewriter.create<mlir::LLVM::GEPOp>(
+    mlir::Value shmemPtr = mlir::LLVM::GEPOp::create(rewriter,
         loc, baseType, rewriter.getI8Type(), castPtr, gepArgs);
     rewriter.replaceOp(op, {shmemPtr});
     return mlir::success();
diff --git a/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp b/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
index 750569c126642..5e5ee2905e83a 100644
--- a/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
@@ -89,7 +89,7 @@ static mlir::Value createConvertOp(mlir::PatternRewriter &rewriter,
                                    mlir::Location loc, mlir::Type toTy,
                                    mlir::Value val) {
   if (val.getType() != toTy)
-    return rewriter.create<fir::ConvertOp>(loc, toTy, val);
+    return fir::ConvertOp::create(rewriter, loc, toTy, val);
   return val;
 }
 
@@ -118,7 +118,7 @@ static mlir::LogicalResult convertOpToCall(OpTy op,
     errmsg = op.getErrmsg();
   } else {
     mlir::Type boxNoneTy = fir::BoxType::get(builder.getNoneType());
-    errmsg = builder.create<fir::AbsentOp>(loc, boxNoneTy).getResult();
+    errmsg = fir::AbsentOp::create(builder, loc, boxNoneTy).getResult();
   }
   llvm::SmallVector<mlir::Value> args;
   if constexpr (std::is_same_v<OpTy, cuf::AllocateOp>) {
@@ -148,7 +148,7 @@ static mlir::LogicalResult convertOpToCall(OpTy op,
         fir::runtime::createArguments(builder, loc, fTy, op.getBox(), hasStat,
                                       errmsg, sourceFile, sourceLine);
   }
-  auto callOp = builder.create<fir::CallOp>(loc, func, args);
+  auto callOp = fir::CallOp::create(builder, loc, func, args);
   rewriter.replaceOp(op, callOp);
   return mlir::success();
 }
@@ -301,7 +301,7 @@ struct CUFAllocOpConversion : public mlir::OpRewritePattern<cuf::AllocOp> {
     if (inDeviceContext(op.getOperation())) {
       // In device context just replace the cuf.alloc operation with a fir.alloc
       // the cuf.free will be removed.
-      auto allocaOp = rewriter.create<fir::AllocaOp>(
+      auto allocaOp = fir::AllocaOp::create(rewriter,
           loc, op.getInType(), op.getUniqName() ? *op.getUniqName() : "",
           op.getBindcName() ? *op.getBindcName() : "", op.getTypeparams(),
           op.getShape());
@@ -338,14 +338,14 @@ struct CUFAllocOpConversion : public mlir::OpRewritePattern<cuf::AllocOp> {
           assert(!op.getShape().empty() && "expect shape with dynamic arrays");
           nbElem = builder.loadIfRef(loc, op.getShape()[0]);
           for (unsigned i = 1; i < op.getShape().size(); ++i) {
-            nbElem = rewriter.create<mlir::arith::MulIOp>(
+            nbElem = mlir::arith::MulIOp::create(rewriter,
                 loc, nbElem, builder.loadIfRef(loc, op.getShape()[i]));
           }
         } else {
           nbElem = builder.createIntegerConstant(loc, builder.getIndexType(),
                                                  seqTy.getConstantArraySize());
         }
-        bytes = rewriter.create<mlir::arith::MulIOp>(loc, nbElem, width);
+        bytes = mlir::arith::MulIOp::create(rewriter, loc, nbElem, width);
       } else if (fir::isa_derived(op.getInType())) {
         mlir::Type structTy = typeConverter->convertType(op.getInType());
         std::size_t structSize = dl->getTypeSizeInBits(structTy) / 8;
@@ -363,7 +363,7 @@ struct CUFAllocOpConversion : public mlir::OpRewritePattern<cuf::AllocOp> {
           loc, builder.getI32Type(), getMemType(op.getDataAttr()));
       llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
           builder, loc, fTy, bytes, memTy, sourceFile, sourceLine)};
-      auto callOp = builder.create<fir::CallOp>(loc, func, args);
+      auto callOp = fir::CallOp::create(builder, loc, func, args);
       callOp->setAttr(cuf::getDataAttrName(), op.getDataAttrAttr());
       auto convOp = builder.createConvert(loc, op.getResult().getType(),
                                           callOp.getResult(0));
@@ -386,7 +386,7 @@ struct CUFAllocOpConversion : public mlir::OpRewritePattern<cuf::AllocOp> {
 
     llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
         builder, loc, fTy, sizeInBytes, sourceFile, sourceLine)};
-    auto callOp = builder.create<fir::CallOp>(loc, func, args);
+    auto callOp = fir::CallOp::create(builder, loc, func, args);
     callOp->setAttr(cuf::getDataAttrName(), op.getDataAttrAttr());
     auto convOp = builder.createConvert(loc, op.getResult().getType(),
                                         callOp.getResult(0));
@@ -414,7 +414,7 @@ struct CUFDeviceAddressOpConversion
             op.getHostSymbol().getRootReference().getValue())) {
       auto mod = op->getParentOfType<mlir::ModuleOp>();
       mlir::Location loc = op.getLoc();
-      auto hostAddr = rewriter.create<fir::AddrOfOp>(
+      auto hostAddr = fir::AddrOfOp::create(rewriter,
           loc, fir::ReferenceType::get(global.getType()), op.getHostSymbol());
       fir::FirOpBuilder builder(rewriter, mod);
       mlir::func::FuncOp callee =
@@ -428,7 +428,7 @@ struct CUFDeviceAddressOpConversion
           fir::factory::locationToLineNo(builder, loc, fTy.getInput(2));
       llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
           builder, loc, fTy, conv, sourceFile, sourceLine)};
-      auto call = rewriter.create<fir::CallOp>(loc, callee, args);
+      auto call = fir::CallOp::create(rewriter, loc, callee, args);
       mlir::Value addr = createConvertOp(rewriter, loc, hostAddr.getType(),
                                          call->getResult(0));
       rewriter.replaceOp(op, addr.getDefiningOp());
@@ -456,7 +456,7 @@ struct DeclareOpConversion : public mlir::OpRewritePattern<fir::DeclareOp> {
               addrOfOp.getSymbol().getRootReference().getValue())) {
         if (cuf::isRegisteredDeviceGlobal(global)) {
           rewriter.setInsertionPointAfter(addrOfOp);
-          mlir::Value devAddr = rewriter.create<cuf::DeviceAddressOp>(
+          mlir::Value devAddr = cuf::DeviceAddressOp::create(rewriter,
               op.getLoc(), addrOfOp.getType(), addrOfOp.getSymbol());
           rewriter.startOpModification(op);
           op.getMemrefMutable().assign(devAddr);
@@ -502,7 +502,7 @@ struct CUFFreeOpConversion : public mlir::OpRewritePattern<cuf::FreeOp> {
           loc, builder.getI32Type(), getMemType(op.getDataAttr()));
       llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
           builder, loc, fTy, op.getDevptr(), memTy, sourceFile, sourceLine)};
-      builder.create<fir::CallOp>(loc, func, args);
+      fir::CallOp::create(builder, loc, func, args);
       rewriter.eraseOp(op);
       return mlir::success();
     }
@@ -515,7 +515,7 @@ struct CUFFreeOpConversion : public mlir::OpRewritePattern<cuf::FreeOp> {
         fir::factory::locationToLineNo(builder, loc, fTy.getInput(2));
     llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
         builder, loc, fTy, op.getDevptr(), sourceFile, sourceLine)};
-    auto callOp = builder.create<fir::CallOp>(loc, func, args);
+    auto callOp = fir::CallOp::create(builder, loc, func, args);
     callOp->setAttr(cuf::getDataAttrName(), op.getDataAttrAttr());
     rewriter.eraseOp(op);
     return mlir::success();
@@ -558,18 +558,18 @@ static mlir::Value emboxSrc(mlir::PatternRewriter &rewriter,
       srcTy = fir::LogicalType::get(rewriter.getContext(), 4);
       src = createConvertOp(rewriter, loc, srcTy, src);
       addr = builder.createTemporary(loc, srcTy);
-      builder.create<fir::StoreOp>(loc, src, addr);
+      fir::StoreOp::create(builder, loc, src, addr);
     } else {
       if (dstEleTy && fir::isa_trivial(dstEleTy) && srcTy != dstEleTy) {
         // Use dstEleTy and convert to avoid assign mismatch.
         addr = builder.createTemporary(loc, dstEleTy);
-        auto conv = builder.create<fir::ConvertOp>(loc, dstEleTy, src);
-        builder.create<fir::StoreOp>(loc, conv, addr);
+        auto conv = fir::ConvertOp::create(builder, loc, dstEleTy, src);
+        fir::StoreOp::create(builder, loc, conv, addr);
         srcTy = dstEleTy;
       } else {
         // Put constant in memory if it is not.
         addr = builder.createTemporary(loc, srcTy);
-        builder.create<fir::StoreOp>(loc, src, addr);
+        fir::StoreOp::create(builder, loc, src, addr);
       }
     }
   } else {
@@ -582,7 +582,7 @@ static mlir::Value emboxSrc(mlir::PatternRewriter &rewriter,
                         /*slice=*/nullptr, lenParams,
                         /*tdesc=*/nullptr);
   mlir::Value src = builder.createTemporary(loc, box.getType());
-  builder.create<fir::StoreOp>(loc, box, src);
+  fir::StoreOp::create(builder, loc, box, src);
   return src;
 }
 
@@ -601,7 +601,7 @@ static mlir::Value emboxDst(mlir::PatternRewriter &rewriter,
                         /*slice=*/nullptr, lenParams,
                         /*tdesc=*/nullptr);
   mlir::Value dst = builder.createTemporary(loc, dstBox.getType());
-  builder.create<fir::StoreOp>(loc, dstBox, dst);
+  fir::StoreOp::create(builder, loc, dstBox, dst);
   return dst;
 }
 
@@ -660,7 +660,7 @@ struct CUFDataTransferOpConversion
             fir::factory::locationToLineNo(builder, loc, fTy.getInput(4));
         llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
             builder, loc, fTy, dst, src, modeValue, sourceFile, sourceLine)};
-        builder.create<fir::CallOp>(loc, func, args);
+        fir::CallOp::create(builder, loc, func, args);
         rewriter.eraseOp(op);
         return mlir::success();
       }
@@ -679,12 +679,12 @@ struct CUFDataTransferOpConversion
               extents.push_back(i.value());
         }
 
-        nbElement = rewriter.create<fir::ConvertOp>(loc, i64Ty, extents[0]);
+        nbElement = fir::ConvertOp::create(rewriter, loc, i64Ty, extents[0]);
         for (unsigned i = 1; i < extents.size(); ++i) {
           auto operand =
-              rewriter.create<fir::ConvertOp>(loc, i64Ty, extents[i]);
+              fir::ConvertOp::create(rewriter, loc, i64Ty, extents[i]);
           nbElement =
-              rewriter.create<mlir::arith::MulIOp>(loc, nbElement, operand);
+              mlir::arith::MulIOp::create(rewriter, loc, nbElement, operand);
         }
       } else {
         if (auto seqTy = mlir::dyn_cast_or_null<fir::SequenceType>(dstTy))
@@ -699,11 +699,11 @@ struct CUFDataTransferOpConversion
       } else {
         width = computeWidth(loc, dstTy, kindMap);
       }
-      mlir::Value widthValue = rewriter.create<mlir::arith::ConstantOp>(
+      mlir::Value widthValue = mlir::arith::ConstantOp::create(rewriter,
           loc, i64Ty, rewriter.getIntegerAttr(i64Ty, width));
       mlir::Value bytes =
           nbElement
-              ? rewriter.create<mlir::arith::MulIOp>(loc, nbElement, widthValue)
+              ? mlir::arith::MulIOp::create(rewriter, loc, nbElement, widthValue)
               : widthValue;
 
       mlir::func::FuncOp func =
@@ -719,13 +719,13 @@ struct CUFDataTransferOpConversion
       // Materialize the src if constant.
       if (matchPattern(src.getDefiningOp(), mlir::m_Constant())) {
         mlir::Value temp = builder.createTemporary(loc, srcTy);
-        builder.create<fir::StoreOp>(loc, src, temp);
+        fir::StoreOp::create(builder, loc, src, temp);
         src = temp;
       }
       llvm::SmallVector<mlir::Value> args{
           fir::runtime::createArguments(builder, loc, fTy, dst, src, bytes,
                                         modeValue, sourceFile, sourceLine)};
-      builder.create<fir::CallOp>(loc, func, args);
+      fir::CallOp::create(builder, loc, func, args);
       rewriter.eraseOp(op);
       return mlir::success();
     }
@@ -734,7 +734,7 @@ struct CUFDataTransferOpConversion
       if (mlir::isa<fir::EmboxOp, fir::ReboxOp>(val.getDefiningOp())) {
         // Materialize the box to memory to be able to call the runtime.
         mlir::Value box = builder.createTemporary(loc, val.getType());
-        builder.create<fir::StoreOp>(loc, val, box);
+        fir::StoreOp::create(builder, loc, val, box);
         return box;
       }
       return val;
@@ -768,7 +768,7 @@ struct CUFDataTransferOpConversion
           fir::factory::locationToLineNo(builder, loc, fTy.getInput(4));
       llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
           builder, loc, fTy, dst, src, modeValue, sourceFile, sourceLine)};
-      builder.create<fir::CallOp>(loc, func, args);
+      fir::CallOp::create(builder, loc, func, args);
       rewriter.eraseOp(op);
     } else {
       // Transfer from a descriptor.
@@ -784,7 +784,7 @@ struct CUFDataTransferOpConversion
           fir::factory::locationToLineNo(builder, loc, fTy.getInput(4));
       llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
           builder, loc, fTy, dst, src, modeValue, sourceFile, sourceLine)};
-      builder.create<fir::CallOp>(loc, func, args);
+      fir::CallOp::create(builder, loc, func, args);
       rewriter.eraseOp(op);
     }
     return mlir::success();
@@ -810,20 +810,20 @@ struct CUFLaunchOpConversion
                   mlir::PatternRewriter &rewriter) const override {
     mlir::Location loc = op.getLoc();
     auto idxTy = mlir::IndexType::get(op.getContext());
-    mlir::Value zero = rewriter.create<mlir::arith::ConstantOp>(
+    mlir::Value zero = mlir::arith::ConstantOp::create(rewriter,
         loc, rewriter.getIntegerType(32), rewriter.getI32IntegerAttr(0));
     auto gridSizeX =
-        rewriter.create<mlir::arith::IndexCastOp>(loc, idxTy, op.getGridX());
+        mlir::arith::IndexCastOp::create(rewriter, loc, idxTy, op.getGridX());
     auto gridSizeY =
-        rewriter.create<mlir::arith::IndexCastOp>(loc, idxTy, op.getGridY());
+        mlir::arith::IndexCastOp::create(rewriter, loc, idxTy, op.getGridY());
     auto gridSizeZ =
-        rewriter.create<mlir::arith::IndexCastOp>(loc, idxTy, op.getGridZ());
+        mlir::arith::IndexCastOp::create(rewriter, loc, idxTy, op.getGridZ());
     auto blockSizeX =
-        rewriter.create<mlir::arith::IndexCastOp>(loc, idxTy, op.getBlockX());
+        mlir::arith::IndexCastOp::create(rewriter, loc, idxTy, op.getBlockX());
     auto blockSizeY =
-        rewriter.create<mlir::arith::IndexCastOp>(loc, idxTy, op.getBlockY());
+        mlir::arith::IndexCastOp::create(rewriter, loc, idxTy, op.getBlockY());
     auto blockSizeZ =
-        rewriter.create<mlir::arith::IndexCastOp>(loc, idxTy, op.getBlockZ());
+        mlir::arith::IndexCastOp::create(rewriter, loc, idxTy, op.getBlockZ());
     auto kernelName = mlir::SymbolRefAttr::get(
         rewriter.getStringAttr(cudaDeviceModuleName),
         {mlir::SymbolRefAttr::get(
@@ -835,11 +835,11 @@ struct CUFLaunchOpConversion
             op.getCallee().getLeafReference())) {
       if (auto clusterDimsAttr = funcOp->getAttrOfType<cuf::ClusterDimsAttr>(
               cuf::getClusterDimsAttrName())) {
-        clusterDimX = rewriter.create<mlir::arith::ConstantIndexOp>(
+        clusterDimX = mlir::arith::ConstantIndexOp::create(rewriter,
             loc, clusterDimsAttr.getX().getInt());
-        clusterDimY = rewriter.create<mlir::arith::ConstantIndexOp>(
+        clusterDimY = mlir::arith::ConstantIndexOp::create(rewriter,
             loc, clusterDimsAttr.getY().getInt());
-        clusterDimZ = rewriter.create<mlir::arith::ConstantIndexOp>(
+        clusterDimZ = mlir::arith::ConstantIndexOp::create(rewriter,
             loc, clusterDimsAttr.getZ().getInt());
       }
       procAttr =
@@ -870,7 +870,7 @@ struct CUFLaunchOpConversion
       args.push_back(arg);
     }
     mlir::Value dynamicShmemSize = op.getBytes() ? op.getBytes() : zero;
-    auto gpuLaunchOp = rewriter.create<mlir::gpu::LaunchFuncOp>(
+    auto gpuLaunchOp = mlir::gpu::LaunchFuncOp::create(rewriter,
         loc, kernelName, mlir::gpu::KernelDim3{gridSizeX, gridSizeY, gridSizeZ},
         mlir::gpu::KernelDim3{blockSizeX, blockSizeY, blockSizeZ},
         dynamicShmemSize, args);
@@ -883,7 +883,7 @@ struct CUFLaunchOpConversion
       mlir::OpBuilder::InsertionGuard guard(rewriter);
       rewriter.setInsertionPoint(gpuLaunchOp);
       mlir::Value stream =
-          rewriter.create<cuf::StreamCastOp>(loc, op.getStream());
+          cuf::StreamCastOp::create(rewriter, loc, op.getStream());
       gpuLaunchOp.getAsyncDependenciesMutable().append(stream);
     }
     if (procAttr)
@@ -916,7 +916,7 @@ struct CUFSyncDescriptorOpConversion
     if (!globalOp)
       return mlir::failure();
 
-    auto hostAddr = builder.create<fir::AddrOfOp>(
+    auto hostAddr = fir::AddrOfOp::create(builder,
         loc, fir::ReferenceType::get(globalOp.getType()), op.getGlobalName());
     fir::runtime::cuda::genSyncGlobalDescriptor(builder, loc, hostAddr);
     op.erase();
diff --git a/flang/lib/Optimizer/Transforms/CharacterConversion.cpp b/flang/lib/Optimizer/Transforms/CharacterConversion.cpp
index aee7e8ca5cb66..2c2db1d4d6ded 100644
--- a/flang/lib/Optimizer/Transforms/CharacterConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/CharacterConversion.cpp
@@ -48,12 +48,12 @@ class CharacterConvertConversion
                << "running character conversion on " << conv << '\n');
 
     // Establish a loop that executes count iterations.
-    auto zero = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 0);
-    auto one = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 1);
+    auto zero = mlir::arith::ConstantIndexOp::create(rewriter, loc, 0);
+    auto one = mlir::arith::ConstantIndexOp::create(rewriter, loc, 1);
     auto idxTy = rewriter.getIndexType();
-    auto castCnt = rewriter.create<fir::ConvertOp>(loc, idxTy, conv.getCount());
-    auto countm1 = rewriter.create<mlir::arith::SubIOp>(loc, castCnt, one);
-    auto loop = rewriter.create<fir::DoLoopOp>(loc, zero, countm1, one);
+    auto castCnt = fir::ConvertOp::create(rewriter, loc, idxTy, conv.getCount());
+    auto countm1 = mlir::arith::SubIOp::create(rewriter, loc, castCnt, one);
+    auto loop = fir::DoLoopOp::create(rewriter, loc, zero, countm1, one);
     auto insPt = rewriter.saveInsertionPoint();
     rewriter.setInsertionPointToStart(loop.getBody());
 
@@ -75,21 +75,21 @@ class CharacterConvertConversion
     auto toTy = rewriter.getIntegerType(toBits);
     auto toPtrTy = pointerType(toBits);
     auto fromPtr =
-        rewriter.create<fir::ConvertOp>(loc, fromPtrTy, conv.getFrom());
-    auto toPtr = rewriter.create<fir::ConvertOp>(loc, toPtrTy, conv.getTo());
+        fir::ConvertOp::create(rewriter, loc, fromPtrTy, conv.getFrom());
+    auto toPtr = fir::ConvertOp::create(rewriter, loc, toPtrTy, conv.getTo());
     auto getEleTy = [&](unsigned bits) {
       return fir::ReferenceType::get(rewriter.getIntegerType(bits));
     };
-    auto fromi = rewriter.create<fir::CoordinateOp>(
+    auto fromi = fir::CoordinateOp::create(rewriter,
         loc, getEleTy(fromBits), fromPtr,
         mlir::ValueRange{loop.getInductionVar()});
-    auto toi = rewriter.create<fir::CoordinateOp>(
+    auto toi = fir::CoordinateOp::create(rewriter,
         loc, getEleTy(toBits), toPtr, mlir::ValueRange{loop.getInductionVar()});
-    auto load = rewriter.create<fir::LoadOp>(loc, fromi);
+    auto load = fir::LoadOp::create(rewriter, loc, fromi);
     mlir::Value icast =
         (fromBits >= toBits)
-            ? rewriter.create<fir::ConvertOp>(loc, toTy, load).getResult()
-            : rewriter.create<mlir::arith::ExtUIOp>(loc, toTy, load)
+            ? fir::ConvertOp::create(rewriter, loc, toTy, load).getResult()
+            : mlir::arith::ExtUIOp::create(rewriter, loc, toTy, load)
                   .getResult();
     rewriter.replaceOpWithNewOp<fir::StoreOp>(conv, icast, toi);
     rewriter.restoreInsertionPoint(insPt);
diff --git a/flang/lib/Optimizer/Transforms/ConstantArgumentGlobalisation.cpp b/flang/lib/Optimizer/Transforms/ConstantArgumentGlobalisation.cpp
index 239a7cdaa4cf2..09cec03ee0916 100644
--- a/flang/lib/Optimizer/Transforms/ConstantArgumentGlobalisation.cpp
+++ b/flang/lib/Optimizer/Transforms/ConstantArgumentGlobalisation.cpp
@@ -111,10 +111,10 @@ class CallOpRewriter : public mlir::OpRewritePattern<fir::CallOp> {
             builder.insert(cln);
             mlir::Value val =
                 builder.createConvert(loc, varTy, cln->getResult(0));
-            builder.create<fir::HasValueOp>(loc, val);
+            fir::HasValueOp::create(builder, loc, val);
           },
           builder.createInternalLinkage());
-      mlir::Value addr = builder.create<fir::AddrOfOp>(loc, global.resultType(),
+      mlir::Value addr = fir::AddrOfOp::create(builder, loc, global.resultType(),
                                                        global.getSymbol());
       newOperands.push_back(addr);
       needUpdate = true;
@@ -125,7 +125,7 @@ class CallOpRewriter : public mlir::OpRewritePattern<fir::CallOp> {
       llvm::SmallVector<mlir::Type> newResultTypes;
       newResultTypes.append(callOp.getResultTypes().begin(),
                             callOp.getResultTypes().end());
-      fir::CallOp newOp = builder.create<fir::CallOp>(
+      fir::CallOp newOp = fir::CallOp::create(builder,
           loc,
           callOp.getCallee().has_value() ? callOp.getCallee().value()
                                          : mlir::SymbolRefAttr{},
diff --git a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
index 3d35803e6a2d3..0c070ed6d9c3f 100644
--- a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
+++ b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
@@ -83,17 +83,17 @@ class CfgLoopConv : public mlir::OpRewritePattern<fir::DoLoopOp> {
 
     // Initalization block
     rewriter.setInsertionPointToEnd(initBlock);
-    auto diff = rewriter.create<mlir::arith::SubIOp>(loc, high, low);
-    auto distance = rewriter.create<mlir::arith::AddIOp>(loc, diff, step);
+    auto diff = mlir::arith::SubIOp::create(rewriter, loc, high, low);
+    auto distance = mlir::arith::AddIOp::create(rewriter, loc, diff, step);
     mlir::Value iters =
-        rewriter.create<mlir::arith::DivSIOp>(loc, distance, step);
+        mlir::arith::DivSIOp::create(rewriter, loc, distance, step);
 
     if (forceLoopToExecuteOnce) {
-      auto zero = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 0);
-      auto cond = rewriter.create<mlir::arith::CmpIOp>(
+      auto zero = mlir::arith::ConstantIndexOp::create(rewriter, loc, 0);
+      auto cond = mlir::arith::CmpIOp::create(rewriter,
           loc, arith::CmpIPredicate::sle, iters, zero);
-      auto one = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 1);
-      iters = rewriter.create<mlir::arith::SelectOp>(loc, cond, one, iters);
+      auto one = mlir::arith::ConstantIndexOp::create(rewriter, loc, 1);
+      iters = mlir::arith::SelectOp::create(rewriter, loc, cond, one, iters);
     }
 
     llvm::SmallVector<mlir::Value> loopOperands;
@@ -102,20 +102,20 @@ class CfgLoopConv : public mlir::OpRewritePattern<fir::DoLoopOp> {
     loopOperands.append(operands.begin(), operands.end());
     loopOperands.push_back(iters);
 
-    rewriter.create<mlir::cf::BranchOp>(loc, conditionalBlock, loopOperands);
+    mlir::cf::BranchOp::create(rewriter, loc, conditionalBlock, loopOperands);
 
     // Last loop block
     auto *terminator = lastBlock->getTerminator();
     rewriter.setInsertionPointToEnd(lastBlock);
     auto iv = conditionalBlock->getArgument(0);
     mlir::Value steppedIndex =
-        rewriter.create<mlir::arith::AddIOp>(loc, iv, step, iofAttr);
+        mlir::arith::AddIOp::create(rewriter, loc, iv, step, iofAttr);
     assert(steppedIndex && "must be a Value");
     auto lastArg = conditionalBlock->getNumArguments() - 1;
     auto itersLeft = conditionalBlock->getArgument(lastArg);
-    auto one = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 1);
+    auto one = mlir::arith::ConstantIndexOp::create(rewriter, loc, 1);
     mlir::Value itersMinusOne =
-        rewriter.create<mlir::arith::SubIOp>(loc, itersLeft, one);
+        mlir::arith::SubIOp::create(rewriter, loc, itersLeft, one);
 
     llvm::SmallVector<mlir::Value> loopCarried;
     loopCarried.push_back(steppedIndex);
@@ -124,7 +124,7 @@ class CfgLoopConv : public mlir::OpRewritePattern<fir::DoLoopOp> {
     loopCarried.append(begin, terminator->operand_end());
     loopCarried.push_back(itersMinusOne);
     auto backEdge =
-        rewriter.create<mlir::cf::BranchOp>(loc, conditionalBlock, loopCarried);
+        mlir::cf::BranchOp::create(rewriter, loc, conditionalBlock, loopCarried);
     rewriter.eraseOp(terminator);
 
     // Copy loop annotations from the do loop to the loop back edge.
@@ -133,11 +133,11 @@ class CfgLoopConv : public mlir::OpRewritePattern<fir::DoLoopOp> {
 
     // Conditional block
     rewriter.setInsertionPointToEnd(conditionalBlock);
-    auto zero = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 0);
-    auto comparison = rewriter.create<mlir::arith::CmpIOp>(
+    auto zero = mlir::arith::ConstantIndexOp::create(rewriter, loc, 0);
+    auto comparison = mlir::arith::CmpIOp::create(rewriter,
         loc, arith::CmpIPredicate::sgt, itersLeft, zero);
 
-    rewriter.create<mlir::cf::CondBranchOp>(
+    mlir::cf::CondBranchOp::create(rewriter,
         loc, comparison, firstBlock, llvm::ArrayRef<mlir::Value>(), endBlock,
         llvm::ArrayRef<mlir::Value>());
 
@@ -180,7 +180,7 @@ class CfgIfConv : public mlir::OpRewritePattern<fir::IfOp> {
       continueBlock = rewriter.createBlock(
           remainingOpsBlock, ifOp.getResultTypes(),
           llvm::SmallVector<mlir::Location>(ifOp.getNumResults(), loc));
-      rewriter.create<mlir::cf::BranchOp>(loc, remainingOpsBlock);
+      mlir::cf::BranchOp::create(rewriter, loc, remainingOpsBlock);
     }
 
     // Move blocks from the "then" region to the region containing 'fir.if',
@@ -190,7 +190,7 @@ class CfgIfConv : public mlir::OpRewritePattern<fir::IfOp> {
     auto *ifOpTerminator = ifOpRegion.back().getTerminator();
     auto ifOpTerminatorOperands = ifOpTerminator->getOperands();
     rewriter.setInsertionPointToEnd(&ifOpRegion.back());
-    rewriter.create<mlir::cf::BranchOp>(loc, continueBlock,
+    mlir::cf::BranchOp::create(rewriter, loc, continueBlock,
                                         ifOpTerminatorOperands);
     rewriter.eraseOp(ifOpTerminator);
     rewriter.inlineRegionBefore(ifOpRegion, continueBlock);
@@ -205,14 +205,14 @@ class CfgIfConv : public mlir::OpRewritePattern<fir::IfOp> {
       auto *otherwiseTerm = otherwiseRegion.back().getTerminator();
       auto otherwiseTermOperands = otherwiseTerm->getOperands();
       rewriter.setInsertionPointToEnd(&otherwiseRegion.back());
-      rewriter.create<mlir::cf::BranchOp>(loc, continueBlock,
+      mlir::cf::BranchOp::create(rewriter, loc, continueBlock,
                                           otherwiseTermOperands);
       rewriter.eraseOp(otherwiseTerm);
       rewriter.inlineRegionBefore(otherwiseRegion, continueBlock);
     }
 
     rewriter.setInsertionPointToEnd(condBlock);
-    auto branchOp = rewriter.create<mlir::cf::CondBranchOp>(
+    auto branchOp = mlir::cf::CondBranchOp::create(rewriter,
         loc, ifOp.getCondition(), ifOpBlock, llvm::ArrayRef<mlir::Value>(),
         otherwiseBlock, llvm::ArrayRef<mlir::Value>());
     llvm::ArrayRef<int32_t> weights = ifOp.getWeights();
@@ -269,7 +269,7 @@ class CfgIterWhileConv : public mlir::OpRewritePattern<fir::IterWhileOp> {
     rewriter.setInsertionPointToEnd(lastBodyBlock);
     auto step = whileOp.getStep();
     mlir::Value stepped =
-        rewriter.create<mlir::arith::AddIOp>(loc, iv, step, iofAttr);
+        mlir::arith::AddIOp::create(rewriter, loc, iv, step, iofAttr);
     assert(stepped && "must be a Value");
 
     llvm::SmallVector<mlir::Value> loopCarried;
@@ -278,7 +278,7 @@ class CfgIterWhileConv : public mlir::OpRewritePattern<fir::IterWhileOp> {
                      ? std::next(terminator->operand_begin())
                      : terminator->operand_begin();
     loopCarried.append(begin, terminator->operand_end());
-    rewriter.create<mlir::cf::BranchOp>(loc, conditionBlock, loopCarried);
+    mlir::cf::BranchOp::create(rewriter, loc, conditionBlock, loopCarried);
     rewriter.eraseOp(terminator);
 
     // Compute loop bounds before branching to the condition.
@@ -293,29 +293,29 @@ class CfgIterWhileConv : public mlir::OpRewritePattern<fir::IterWhileOp> {
     destOperands.push_back(lowerBound);
     auto iterOperands = whileOp.getIterOperands();
     destOperands.append(iterOperands.begin(), iterOperands.end());
-    rewriter.create<mlir::cf::BranchOp>(loc, conditionBlock, destOperands);
+    mlir::cf::BranchOp::create(rewriter, loc, conditionBlock, destOperands);
 
     // With the body block done, we can fill in the condition block.
     rewriter.setInsertionPointToEnd(conditionBlock);
     // The comparison depends on the sign of the step value. We fully expect
     // this expression to be folded by the optimizer or LLVM. This expression
     // is written this way so that `step == 0` always returns `false`.
-    auto zero = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 0);
-    auto compl0 = rewriter.create<mlir::arith::CmpIOp>(
+    auto zero = mlir::arith::ConstantIndexOp::create(rewriter, loc, 0);
+    auto compl0 = mlir::arith::CmpIOp::create(rewriter,
         loc, arith::CmpIPredicate::slt, zero, step);
-    auto compl1 = rewriter.create<mlir::arith::CmpIOp>(
+    auto compl1 = mlir::arith::CmpIOp::create(rewriter,
         loc, arith::CmpIPredicate::sle, iv, upperBound);
-    auto compl2 = rewriter.create<mlir::arith::CmpIOp>(
+    auto compl2 = mlir::arith::CmpIOp::create(rewriter,
         loc, arith::CmpIPredicate::slt, step, zero);
-    auto compl3 = rewriter.create<mlir::arith::CmpIOp>(
+    auto compl3 = mlir::arith::CmpIOp::create(rewriter,
         loc, arith::CmpIPredicate::sle, upperBound, iv);
-    auto cmp0 = rewriter.create<mlir::arith::AndIOp>(loc, compl0, compl1);
-    auto cmp1 = rewriter.create<mlir::arith::AndIOp>(loc, compl2, compl3);
-    auto cmp2 = rewriter.create<mlir::arith::OrIOp>(loc, cmp0, cmp1);
+    auto cmp0 = mlir::arith::AndIOp::create(rewriter, loc, compl0, compl1);
+    auto cmp1 = mlir::arith::AndIOp::create(rewriter, loc, compl2, compl3);
+    auto cmp2 = mlir::arith::OrIOp::create(rewriter, loc, cmp0, cmp1);
     // Remember to AND in the early-exit bool.
     auto comparison =
-        rewriter.create<mlir::arith::AndIOp>(loc, iterateVar, cmp2);
-    rewriter.create<mlir::cf::CondBranchOp>(
+        mlir::arith::AndIOp::create(rewriter, loc, iterateVar, cmp2);
+    mlir::cf::CondBranchOp::create(rewriter,
         loc, comparison, firstBodyBlock, llvm::ArrayRef<mlir::Value>(),
         endBlock, llvm::ArrayRef<mlir::Value>());
     // The result of the loop operation is the values of the condition block
diff --git a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
index abad500d3f657..8d16c8cc74044 100644
--- a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
+++ b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
@@ -103,13 +103,13 @@ mlir::LLVM::DILocalVariableAttr DebugTypeGenerator::generateArtificialVariable(
   mlir::Type type = val.getType();
   if (!mlir::isa<mlir::IntegerType>(type) || !type.isSignlessInteger()) {
     type = builder.getIntegerType(64);
-    val = builder.create<fir::ConvertOp>(declOp.getLoc(), type, val);
+    val = fir::ConvertOp::create(builder, declOp.getLoc(), type, val);
   }
   mlir::LLVM::DITypeAttr Ty = convertType(type, fileAttr, scope, declOp);
   auto lvAttr = mlir::LLVM::DILocalVariableAttr::get(
       context, scope, name, fileAttr, /*line=*/0, /*argNo=*/0,
       /*alignInBits=*/0, Ty, mlir::LLVM::DIFlags::Artificial);
-  builder.create<mlir::LLVM::DbgValueOp>(declOp.getLoc(), val, lvAttr, nullptr);
+  mlir::LLVM::DbgValueOp::create(builder, declOp.getLoc(), val, lvAttr, nullptr);
   return lvAttr;
 }
 
diff --git a/flang/lib/Optimizer/Transforms/FIRToSCF.cpp b/flang/lib/Optimizer/Transforms/FIRToSCF.cpp
index f06ad2db90d55..d7d1865bc56ba 100644
--- a/flang/lib/Optimizer/Transforms/FIRToSCF.cpp
+++ b/flang/lib/Optimizer/Transforms/FIRToSCF.cpp
@@ -49,13 +49,13 @@ struct DoLoopConversion : public OpRewritePattern<fir::DoLoopOp> {
     // must be a positive value.
     // For easier conversion, we calculate the trip count and use a canonical
     // induction variable.
-    auto diff = rewriter.create<arith::SubIOp>(loc, high, low);
-    auto distance = rewriter.create<arith::AddIOp>(loc, diff, step);
-    auto tripCount = rewriter.create<arith::DivSIOp>(loc, distance, step);
-    auto zero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
-    auto one = rewriter.create<arith::ConstantIndexOp>(loc, 1);
+    auto diff = arith::SubIOp::create(rewriter, loc, high, low);
+    auto distance = arith::AddIOp::create(rewriter, loc, diff, step);
+    auto tripCount = arith::DivSIOp::create(rewriter, loc, distance, step);
+    auto zero = arith::ConstantIndexOp::create(rewriter, loc, 0);
+    auto one = arith::ConstantIndexOp::create(rewriter, loc, 1);
     auto scfForOp =
-        rewriter.create<scf::ForOp>(loc, zero, tripCount, one, iterArgs);
+        scf::ForOp::create(rewriter, loc, zero, tripCount, one, iterArgs);
 
     auto &loopOps = doLoopOp.getBody()->getOperations();
     auto resultOp = cast<fir::ResultOp>(doLoopOp.getBody()->getTerminator());
@@ -68,12 +68,12 @@ struct DoLoopConversion : public OpRewritePattern<fir::DoLoopOp> {
 
     rewriter.setInsertionPointToStart(loweredBody);
     Value iv =
-        rewriter.create<arith::MulIOp>(loc, scfForOp.getInductionVar(), step);
-    iv = rewriter.create<arith::AddIOp>(loc, low, iv);
+        arith::MulIOp::create(rewriter, loc, scfForOp.getInductionVar(), step);
+    iv = arith::AddIOp::create(rewriter, loc, low, iv);
 
     if (!results.empty()) {
       rewriter.setInsertionPointToEnd(loweredBody);
-      rewriter.create<scf::YieldOp>(resultOp->getLoc(), results);
+      scf::YieldOp::create(rewriter, resultOp->getLoc(), results);
     }
     doLoopOp.getInductionVar().replaceAllUsesWith(iv);
     rewriter.replaceAllUsesWith(doLoopOp.getRegionIterArgs(),
diff --git a/flang/lib/Optimizer/Transforms/GenRuntimeCallsForTest.cpp b/flang/lib/Optimizer/Transforms/GenRuntimeCallsForTest.cpp
index 7ea3b9c670c69..699be12178881 100644
--- a/flang/lib/Optimizer/Transforms/GenRuntimeCallsForTest.cpp
+++ b/flang/lib/Optimizer/Transforms/GenRuntimeCallsForTest.cpp
@@ -91,8 +91,8 @@ void GenRuntimeCallsForTestPass::runOnOperation() {
     // Generate the wrapper function body that consists of a call and return.
     builder.setInsertionPointToStart(callerFunc.addEntryBlock());
     mlir::Block::BlockArgListType args = callerFunc.front().getArguments();
-    auto callOp = builder.create<fir::CallOp>(loc, funcOp, args);
-    builder.create<mlir::func::ReturnOp>(loc, callOp.getResults());
+    auto callOp = fir::CallOp::create(builder, loc, funcOp, args);
+    mlir::func::ReturnOp::create(builder, loc, callOp.getResults());
 
     newFuncs.push_back(callerFunc.getOperation());
     builder.restoreInsertionPoint(insertPt);
diff --git a/flang/lib/Optimizer/Transforms/LoopVersioning.cpp b/flang/lib/Optimizer/Transforms/LoopVersioning.cpp
index 056bdf63d914f..dbbc740a6f027 100644
--- a/flang/lib/Optimizer/Transforms/LoopVersioning.cpp
+++ b/flang/lib/Optimizer/Transforms/LoopVersioning.cpp
@@ -285,7 +285,7 @@ static mlir::Value getIndex(fir::FirOpBuilder &builder, mlir::Operation *op,
   // index_0 = index - lb;
   if (lb.getType() != index.getType())
     lb = builder.createConvert(coop.getLoc(), index.getType(), lb);
-  return builder.create<mlir::arith::SubIOp>(coop.getLoc(), index, lb);
+  return mlir::arith::SubIOp::create(builder, coop.getLoc(), index, lb);
 }
 
 void LoopVersioningPass::runOnOperation() {
@@ -483,25 +483,25 @@ void LoopVersioningPass::runOnOperation() {
       unsigned ndims = arg.rank;
       for (unsigned i = 0; i < ndims; i++) {
         mlir::Value dimIdx = builder.createIntegerConstant(loc, idxTy, i);
-        arg.dims[i] = builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy, idxTy,
+        arg.dims[i] = fir::BoxDimsOp::create(builder, loc, idxTy, idxTy, idxTy,
                                                      arg.arg, dimIdx);
       }
       // We only care about lowest order dimension, here.
       mlir::Value elemSize =
           builder.createIntegerConstant(loc, idxTy, arg.size);
-      mlir::Value cmp = builder.create<mlir::arith::CmpIOp>(
+      mlir::Value cmp = mlir::arith::CmpIOp::create(builder,
           loc, mlir::arith::CmpIPredicate::eq, arg.dims[0].getResult(2),
           elemSize);
       if (!allCompares) {
         allCompares = cmp;
       } else {
         allCompares =
-            builder.create<mlir::arith::AndIOp>(loc, cmp, allCompares);
+            mlir::arith::AndIOp::create(builder, loc, cmp, allCompares);
       }
     }
 
     auto ifOp =
-        builder.create<fir::IfOp>(loc, op.op->getResultTypes(), allCompares,
+        fir::IfOp::create(builder, loc, op.op->getResultTypes(), allCompares,
                                   /*withElse=*/true);
     builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
 
@@ -515,8 +515,8 @@ void LoopVersioningPass::runOnOperation() {
       mlir::Type arrTy = fir::SequenceType::get(newShape, elementType);
       mlir::Type boxArrTy = fir::BoxType::get(arrTy);
       mlir::Type refArrTy = builder.getRefType(arrTy);
-      auto carg = builder.create<fir::ConvertOp>(loc, boxArrTy, arg.arg);
-      auto caddr = builder.create<fir::BoxAddrOp>(loc, refArrTy, carg);
+      auto carg = fir::ConvertOp::create(builder, loc, boxArrTy, arg.arg);
+      auto caddr = fir::BoxAddrOp::create(builder, loc, refArrTy, carg);
       auto insPt = builder.saveInsertionPoint();
       // Use caddr instead of arg.
       clonedLoop->walk([&](mlir::Operation *coop) {
@@ -540,8 +540,8 @@ void LoopVersioningPass::runOnOperation() {
             mlir::Value scale =
                 builder.createConvert(loc, idxTy, arg.dims[i].getResult(2));
             curIndex =
-                builder.create<mlir::arith::MulIOp>(loc, scale, curIndex);
-            totalIndex = (totalIndex) ? builder.create<mlir::arith::AddIOp>(
+                mlir::arith::MulIOp::create(builder, loc, scale, curIndex);
+            totalIndex = (totalIndex) ? mlir::arith::AddIOp::create(builder,
                                             loc, curIndex, totalIndex)
                                       : curIndex;
           }
@@ -554,15 +554,15 @@ void LoopVersioningPass::runOnOperation() {
             unsigned bits = llvm::Log2_32(arg.size);
             mlir::Value elemShift =
                 builder.createIntegerConstant(loc, idxTy, bits);
-            totalIndex = builder.create<mlir::arith::AddIOp>(
+            totalIndex = mlir::arith::AddIOp::create(builder,
                 loc,
-                builder.create<mlir::arith::ShRSIOp>(loc, totalIndex,
+                mlir::arith::ShRSIOp::create(builder, loc, totalIndex,
                                                      elemShift),
                 finalIndex);
           } else {
             totalIndex = finalIndex;
           }
-          auto newOp = builder.create<fir::CoordinateOp>(
+          auto newOp = fir::CoordinateOp::create(builder,
               loc, builder.getRefType(elementType), caddr,
               mlir::ValueRange{totalIndex});
           LLVM_DEBUG(newOp->dump());
@@ -582,7 +582,7 @@ void LoopVersioningPass::runOnOperation() {
     mlir::ResultRange results = clonedLoop->getResults();
     bool hasResults = (results.size() > 0);
     if (hasResults)
-      builder.create<fir::ResultOp>(loc, results);
+      fir::ResultOp::create(builder, loc, results);
 
     // Add the original loop in the else-side of the if operation.
     builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
@@ -591,7 +591,7 @@ void LoopVersioningPass::runOnOperation() {
     builder.insert(op.op);
     // Rely on "cloned loop has results, so original loop also has results".
     if (hasResults) {
-      builder.create<fir::ResultOp>(loc, op.op->getResults());
+      fir::ResultOp::create(builder, loc, op.op->getResults());
     } else {
       // Use an assert to check this.
       assert(op.op->getResults().size() == 0 &&
diff --git a/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp b/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
index 3f308a8f4b560..1a0bb6c98e1d8 100644
--- a/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
+++ b/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
@@ -68,7 +68,7 @@ static mlir::Value genAllocmem(mlir::OpBuilder &builder, fir::AllocaOp alloca,
   };
   llvm::StringRef uniqName = unpackName(alloca.getUniqName());
   llvm::StringRef bindcName = unpackName(alloca.getBindcName());
-  auto heap = builder.create<fir::AllocMemOp>(alloca.getLoc(), varTy, uniqName,
+  auto heap = fir::AllocMemOp::create(builder, alloca.getLoc(), varTy, uniqName,
                                               bindcName, alloca.getTypeparams(),
                                               alloca.getShape());
   LLVM_DEBUG(llvm::dbgs() << "memory allocation opt: replaced " << alloca
@@ -78,7 +78,7 @@ static mlir::Value genAllocmem(mlir::OpBuilder &builder, fir::AllocaOp alloca,
 
 static void genFreemem(mlir::Location loc, mlir::OpBuilder &builder,
                        mlir::Value allocmem) {
-  [[maybe_unused]] auto free = builder.create<fir::FreeMemOp>(loc, allocmem);
+  [[maybe_unused]] auto free = fir::FreeMemOp::create(builder, loc, allocmem);
   LLVM_DEBUG(llvm::dbgs() << "memory allocation opt: add free " << free
                           << " for " << allocmem << '\n');
 }
diff --git a/flang/lib/Optimizer/Transforms/MemoryUtils.cpp b/flang/lib/Optimizer/Transforms/MemoryUtils.cpp
index bc4fcd8b0112e..d295758abd852 100644
--- a/flang/lib/Optimizer/Transforms/MemoryUtils.cpp
+++ b/flang/lib/Optimizer/Transforms/MemoryUtils.cpp
@@ -200,29 +200,29 @@ void AllocaReplaceImpl::genIndirectDeallocation(
   // and access it indirectly in the entry points that do not dominate.
   rewriter.setInsertionPointToStart(&owningRegion.front());
   mlir::Type heapType = fir::HeapType::get(alloca.getInType());
-  mlir::Value ptrVar = rewriter.create<fir::AllocaOp>(loc, heapType);
-  mlir::Value nullPtr = rewriter.create<fir::ZeroOp>(loc, heapType);
-  rewriter.create<fir::StoreOp>(loc, nullPtr, ptrVar);
+  mlir::Value ptrVar = fir::AllocaOp::create(rewriter, loc, heapType);
+  mlir::Value nullPtr = fir::ZeroOp::create(rewriter, loc, heapType);
+  fir::StoreOp::create(rewriter, loc, nullPtr, ptrVar);
   // TODO: introducing a pointer compare op in FIR would help
   // generating less IR here.
   mlir::Type intPtrTy = fir::getIntPtrType(rewriter);
-  mlir::Value c0 = rewriter.create<mlir::arith::ConstantOp>(
+  mlir::Value c0 = mlir::arith::ConstantOp::create(rewriter,
       loc, intPtrTy, rewriter.getIntegerAttr(intPtrTy, 0));
 
   // Store new storage address right after its creation.
   rewriter.restoreInsertionPoint(replacementInsertPoint);
   mlir::Value castReplacement =
       fir::factory::createConvert(rewriter, loc, heapType, replacement);
-  rewriter.create<fir::StoreOp>(loc, castReplacement, ptrVar);
+  fir::StoreOp::create(rewriter, loc, castReplacement, ptrVar);
 
   // Generate conditional deallocation at every deallocation point.
   auto genConditionalDealloc = [&](mlir::Location loc) {
-    mlir::Value ptrVal = rewriter.create<fir::LoadOp>(loc, ptrVar);
+    mlir::Value ptrVal = fir::LoadOp::create(rewriter, loc, ptrVar);
     mlir::Value ptrToInt =
-        rewriter.create<fir::ConvertOp>(loc, intPtrTy, ptrVal);
-    mlir::Value isAllocated = rewriter.create<mlir::arith::CmpIOp>(
+        fir::ConvertOp::create(rewriter, loc, intPtrTy, ptrVal);
+    mlir::Value isAllocated = mlir::arith::CmpIOp::create(rewriter,
         loc, mlir::arith::CmpIPredicate::ne, ptrToInt, c0);
-    auto ifOp = rewriter.create<fir::IfOp>(loc, mlir::TypeRange{}, isAllocated,
+    auto ifOp = fir::IfOp::create(rewriter, loc, mlir::TypeRange{}, isAllocated,
                                            /*withElseRegion=*/false);
     rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
     mlir::Value cast = fir::factory::createConvert(
diff --git a/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp b/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp
index 6e45aae4246d0..29b7d9dd77bd2 100644
--- a/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp
@@ -183,49 +183,49 @@ struct DispatchOpConv : public OpConversionPattern<fir::DispatchOp> {
     mlir::Type tdescType =
         fir::TypeDescType::get(mlir::NoneType::get(rewriter.getContext()));
     mlir::Value boxDesc =
-        rewriter.create<fir::BoxTypeDescOp>(loc, tdescType, passedObject);
-    boxDesc = rewriter.create<fir::ConvertOp>(
+        fir::BoxTypeDescOp::create(rewriter, loc, tdescType, passedObject);
+    boxDesc = fir::ConvertOp::create(rewriter,
         loc, fir::ReferenceType::get(typeDescTy), boxDesc);
 
     // Load the bindings descriptor.
     auto bindingsCompName = Fortran::semantics::bindingDescCompName;
     fir::RecordType typeDescRecTy = mlir::cast<fir::RecordType>(typeDescTy);
-    mlir::Value field = rewriter.create<fir::FieldIndexOp>(
+    mlir::Value field = fir::FieldIndexOp::create(rewriter,
         loc, fieldTy, bindingsCompName, typeDescRecTy, mlir::ValueRange{});
     mlir::Type coorTy =
         fir::ReferenceType::get(typeDescRecTy.getType(bindingsCompName));
     mlir::Value bindingBoxAddr =
-        rewriter.create<fir::CoordinateOp>(loc, coorTy, boxDesc, field);
-    mlir::Value bindingBox = rewriter.create<fir::LoadOp>(loc, bindingBoxAddr);
+        fir::CoordinateOp::create(rewriter, loc, coorTy, boxDesc, field);
+    mlir::Value bindingBox = fir::LoadOp::create(rewriter, loc, bindingBoxAddr);
 
     // Load the correct binding.
-    mlir::Value bindings = rewriter.create<fir::BoxAddrOp>(loc, bindingBox);
+    mlir::Value bindings = fir::BoxAddrOp::create(rewriter, loc, bindingBox);
     fir::RecordType bindingTy = fir::unwrapIfDerived(
         mlir::cast<fir::BaseBoxType>(bindingBox.getType()));
     mlir::Type bindingAddrTy = fir::ReferenceType::get(bindingTy);
-    mlir::Value bindingIdxVal = rewriter.create<mlir::arith::ConstantOp>(
+    mlir::Value bindingIdxVal = mlir::arith::ConstantOp::create(rewriter,
         loc, rewriter.getIndexType(), rewriter.getIndexAttr(bindingIdx));
-    mlir::Value bindingAddr = rewriter.create<fir::CoordinateOp>(
+    mlir::Value bindingAddr = fir::CoordinateOp::create(rewriter,
         loc, bindingAddrTy, bindings, bindingIdxVal);
 
     // Get the function pointer.
     auto procCompName = Fortran::semantics::procCompName;
-    mlir::Value procField = rewriter.create<fir::FieldIndexOp>(
+    mlir::Value procField = fir::FieldIndexOp::create(rewriter,
         loc, fieldTy, procCompName, bindingTy, mlir::ValueRange{});
     fir::RecordType procTy =
         mlir::cast<fir::RecordType>(bindingTy.getType(procCompName));
     mlir::Type procRefTy = fir::ReferenceType::get(procTy);
-    mlir::Value procRef = rewriter.create<fir::CoordinateOp>(
+    mlir::Value procRef = fir::CoordinateOp::create(rewriter,
         loc, procRefTy, bindingAddr, procField);
 
     auto addressFieldName = Fortran::lower::builtin::cptrFieldName;
-    mlir::Value addressField = rewriter.create<fir::FieldIndexOp>(
+    mlir::Value addressField = fir::FieldIndexOp::create(rewriter,
         loc, fieldTy, addressFieldName, procTy, mlir::ValueRange{});
     mlir::Type addressTy = procTy.getType(addressFieldName);
     mlir::Type addressRefTy = fir::ReferenceType::get(addressTy);
-    mlir::Value addressRef = rewriter.create<fir::CoordinateOp>(
+    mlir::Value addressRef = fir::CoordinateOp::create(rewriter,
         loc, addressRefTy, procRef, addressField);
-    mlir::Value address = rewriter.create<fir::LoadOp>(loc, addressRef);
+    mlir::Value address = fir::LoadOp::create(rewriter, loc, addressRef);
 
     // Get the function type.
     llvm::SmallVector<mlir::Type> argTypes;
@@ -237,7 +237,7 @@ struct DispatchOpConv : public OpConversionPattern<fir::DispatchOp> {
 
     mlir::Type funTy =
         mlir::FunctionType::get(rewriter.getContext(), argTypes, resTypes);
-    mlir::Value funcPtr = rewriter.create<fir::ConvertOp>(loc, funTy, address);
+    mlir::Value funcPtr = fir::ConvertOp::create(rewriter, loc, funTy, address);
 
     // Make the call.
     llvm::SmallVector<mlir::Value> args{funcPtr};
@@ -398,11 +398,11 @@ llvm::LogicalResult SelectTypeConv::genTypeLadderStep(
       if (code == 0)
         return mlir::emitError(loc)
                << "type code unavailable for " << a.getType();
-      mlir::Value typeCode = rewriter.create<mlir::arith::ConstantOp>(
+      mlir::Value typeCode = mlir::arith::ConstantOp::create(rewriter,
           loc, rewriter.getI8IntegerAttr(code));
-      mlir::Value selectorTypeCode = rewriter.create<fir::BoxTypeCodeOp>(
+      mlir::Value selectorTypeCode = fir::BoxTypeCodeOp::create(rewriter,
           loc, rewriter.getI8Type(), selector);
-      cmp = rewriter.create<mlir::arith::CmpIOp>(
+      cmp = mlir::arith::CmpIOp::create(rewriter,
           loc, mlir::arith::CmpIPredicate::eq, selectorTypeCode, typeCode);
     } else {
       // Flang inline the kind parameter in the type descriptor so we can
@@ -419,15 +419,15 @@ llvm::LogicalResult SelectTypeConv::genTypeLadderStep(
     // Retrieve the type descriptor from the type guard statement record type.
     assert(mlir::isa<fir::RecordType>(a.getType()) && "expect fir.record type");
     mlir::Value typeDescAddr =
-        rewriter.create<fir::TypeDescOp>(loc, mlir::TypeAttr::get(a.getType()));
+        fir::TypeDescOp::create(rewriter, loc, mlir::TypeAttr::get(a.getType()));
     mlir::Type refNoneType = ReferenceType::get(rewriter.getNoneType());
     mlir::Value typeDesc =
-        rewriter.create<ConvertOp>(loc, refNoneType, typeDescAddr);
+        ConvertOp::create(rewriter, loc, refNoneType, typeDescAddr);
 
     // Prepare the selector descriptor for the runtime call.
     mlir::Type descNoneTy = fir::BoxType::get(rewriter.getNoneType());
     mlir::Value descSelector =
-        rewriter.create<ConvertOp>(loc, descNoneTy, selector);
+        ConvertOp::create(rewriter, loc, descNoneTy, selector);
 
     // Generate runtime call.
     llvm::StringRef fctName = RTNAME_STRING(ClassIs);
@@ -455,10 +455,10 @@ llvm::LogicalResult SelectTypeConv::genTypeLadderStep(
       rewriter.createBlock(dest->getParent(), mlir::Region::iterator(dest));
   rewriter.setInsertionPointToEnd(thisBlock);
   if (destOps.has_value())
-    rewriter.create<mlir::cf::CondBranchOp>(loc, cmp, dest, destOps.value(),
+    mlir::cf::CondBranchOp::create(rewriter, loc, cmp, dest, destOps.value(),
                                             newBlock, mlir::ValueRange{});
   else
-    rewriter.create<mlir::cf::CondBranchOp>(loc, cmp, dest, newBlock);
+    mlir::cf::CondBranchOp::create(rewriter, loc, cmp, dest, newBlock);
   rewriter.setInsertionPointToEnd(newBlock);
   return mlir::success();
 }
@@ -470,15 +470,15 @@ SelectTypeConv::genTypeDescCompare(mlir::Location loc, mlir::Value selector,
                                    mlir::PatternRewriter &rewriter) const {
   assert(mlir::isa<fir::RecordType>(ty) && "expect fir.record type");
   mlir::Value typeDescAddr =
-      rewriter.create<fir::TypeDescOp>(loc, mlir::TypeAttr::get(ty));
-  mlir::Value selectorTdescAddr = rewriter.create<fir::BoxTypeDescOp>(
+      fir::TypeDescOp::create(rewriter, loc, mlir::TypeAttr::get(ty));
+  mlir::Value selectorTdescAddr = fir::BoxTypeDescOp::create(rewriter,
       loc, typeDescAddr.getType(), selector);
   auto intPtrTy = rewriter.getIndexType();
   auto typeDescInt =
-      rewriter.create<fir::ConvertOp>(loc, intPtrTy, typeDescAddr);
+      fir::ConvertOp::create(rewriter, loc, intPtrTy, typeDescAddr);
   auto selectorTdescInt =
-      rewriter.create<fir::ConvertOp>(loc, intPtrTy, selectorTdescAddr);
-  return rewriter.create<mlir::arith::CmpIOp>(
+      fir::ConvertOp::create(rewriter, loc, intPtrTy, selectorTdescAddr);
+  return mlir::arith::CmpIOp::create(rewriter,
       loc, mlir::arith::CmpIPredicate::eq, typeDescInt, selectorTdescInt);
 }
 
diff --git a/flang/lib/Optimizer/Transforms/SimplifyFIROperations.cpp b/flang/lib/Optimizer/Transforms/SimplifyFIROperations.cpp
index ad8464b495888..d3441f9b22ac0 100644
--- a/flang/lib/Optimizer/Transforms/SimplifyFIROperations.cpp
+++ b/flang/lib/Optimizer/Transforms/SimplifyFIROperations.cpp
@@ -88,18 +88,18 @@ mlir::LogicalResult IsContiguousBoxCoversion::matchAndRewrite(
     // The scalar cases are supposed to be optimized by the canonicalization.
     if (rank == 1 || (op.getInnermost() && rank > 0)) {
       mlir::Type idxTy = builder.getIndexType();
-      auto eleSize = builder.create<fir::BoxEleSizeOp>(loc, idxTy, box);
+      auto eleSize = fir::BoxEleSizeOp::create(builder, loc, idxTy, box);
       mlir::Value zero = fir::factory::createZeroValue(builder, loc, idxTy);
       auto dimInfo =
-          builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy, idxTy, box, zero);
+          fir::BoxDimsOp::create(builder, loc, idxTy, idxTy, idxTy, box, zero);
       mlir::Value stride = dimInfo.getByteStride();
-      mlir::Value pred1 = builder.create<mlir::arith::CmpIOp>(
+      mlir::Value pred1 = mlir::arith::CmpIOp::create(builder,
           loc, mlir::arith::CmpIPredicate::eq, eleSize, stride);
       mlir::Value extent = dimInfo.getExtent();
-      mlir::Value pred2 = builder.create<mlir::arith::CmpIOp>(
+      mlir::Value pred2 = mlir::arith::CmpIOp::create(builder,
           loc, mlir::arith::CmpIPredicate::eq, extent, zero);
       mlir::Value result =
-          builder.create<mlir::arith::OrIOp>(loc, pred1, pred2);
+          mlir::arith::OrIOp::create(builder, loc, pred1, pred2);
       result = builder.createConvert(loc, op.getType(), result);
       rewriter.replaceOp(op, result);
       return mlir::success();
@@ -192,7 +192,7 @@ class DoConcurrentConversion
         // TODO Should this be a heap allocation instead? For now, we allocate
         // on the stack for each loop iteration.
         mlir::Value localAlloc =
-            rewriter.create<fir::AllocaOp>(loop.getLoc(), localizer.getType());
+            fir::AllocaOp::create(rewriter, loop.getLoc(), localizer.getType());
 
         auto cloneLocalizerRegion = [&](mlir::Region &region,
                                         mlir::ValueRange regionArgs,
@@ -258,7 +258,7 @@ class DoConcurrentConversion
     for (auto [lb, ub, st, iv] :
          llvm::zip_equal(loop.getLowerBound(), loop.getUpperBound(),
                          loop.getStep(), *loop.getLoopInductionVars())) {
-      innermostUnorderdLoop = rewriter.create<fir::DoLoopOp>(
+      innermostUnorderdLoop = fir::DoLoopOp::create(rewriter,
           doConcurentOp.getLoc(), lb, ub, st,
           /*unordred=*/true, /*finalCountValue=*/false,
           /*iterArgs=*/mlir::ValueRange{}, loop.getReduceVars(),
diff --git a/flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp b/flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp
index 4d25a02bf18ba..43aedb564ed81 100644
--- a/flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp
+++ b/flang/lib/Optimizer/Transforms/SimplifyIntrinsics.cpp
@@ -284,7 +284,7 @@ genReductionLoop(fir::FirOpBuilder &builder, mlir::func::FuncOp &funcOp,
                                      fir::SequenceType::getUnknownExtent());
   mlir::Type arrTy = fir::SequenceType::get(flatShape, elementType);
   mlir::Type boxArrTy = fir::BoxType::get(arrTy);
-  mlir::Value array = builder.create<fir::ConvertOp>(loc, boxArrTy, arg);
+  mlir::Value array = fir::ConvertOp::create(builder, loc, boxArrTy, arg);
   mlir::Type resultType = funcOp.getResultTypes()[0];
   mlir::Value init = initVal(builder, loc, resultType);
 
@@ -300,10 +300,10 @@ genReductionLoop(fir::FirOpBuilder &builder, mlir::func::FuncOp &funcOp,
   for (unsigned i = 0; i < rank; ++i) {
     mlir::Value dimIdx = builder.createIntegerConstant(loc, idxTy, i);
     auto dims =
-        builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy, idxTy, array, dimIdx);
+        fir::BoxDimsOp::create(builder, loc, idxTy, idxTy, idxTy, array, dimIdx);
     mlir::Value len = dims.getResult(1);
     // We use C indexing here, so len-1 as loopcount
-    mlir::Value loopCount = builder.create<mlir::arith::SubIOp>(loc, len, one);
+    mlir::Value loopCount = mlir::arith::SubIOp::create(builder, loc, len, one);
     bounds.push_back(loopCount);
   }
   // Create a loop nest consisting of OP operations.
@@ -316,7 +316,7 @@ genReductionLoop(fir::FirOpBuilder &builder, mlir::func::FuncOp &funcOp,
   for (unsigned i = rank; 0 < i; --i) {
     mlir::Value step = one;
     mlir::Value loopCount = bounds[i - 1];
-    auto loop = builder.create<OP>(loc, zeroIdx, loopCount, step,
+    auto loop = OP::create(builder, loc, zeroIdx, loopCount, step,
                                    unorderedOrInitialLoopCond,
                                    /*finalCountValue=*/false, init);
     init = loop.getRegionIterArgs()[resultIndex];
@@ -332,8 +332,8 @@ genReductionLoop(fir::FirOpBuilder &builder, mlir::func::FuncOp &funcOp,
   // We are in the innermost loop: generate the reduction body.
   mlir::Type eleRefTy = builder.getRefType(elementType);
   mlir::Value addr =
-      builder.create<fir::CoordinateOp>(loc, eleRefTy, array, indices);
-  mlir::Value elem = builder.create<fir::LoadOp>(loc, addr);
+      fir::CoordinateOp::create(builder, loc, eleRefTy, array, indices);
+  mlir::Value elem = fir::LoadOp::create(builder, loc, addr);
   mlir::Value reductionVal = genBody(builder, loc, elementType, elem, init);
   // Generate vector with condition to continue while loop at [0] and result
   // from current loop at [1] for IterWhileOp loops, just result at [0] for
@@ -344,7 +344,7 @@ genReductionLoop(fir::FirOpBuilder &builder, mlir::func::FuncOp &funcOp,
   // to return the updated value of the reduction to the enclosing
   // loops.
   for (unsigned i = 0; i < rank; ++i) {
-    auto result = builder.create<fir::ResultOp>(loc, results);
+    auto result = fir::ResultOp::create(builder, loc, results);
     // Proceed to the outer loop.
     auto loop = mlir::cast<OP>(result->getParentOp());
     results = loop.getResults();
@@ -354,7 +354,7 @@ genReductionLoop(fir::FirOpBuilder &builder, mlir::func::FuncOp &funcOp,
   }
   // End of loop nest. The insertion point is after the outermost loop.
   // Return the reduction value from the function.
-  builder.create<mlir::func::ReturnOp>(loc, results[resultIndex]);
+  mlir::func::ReturnOp::create(builder, loc, results[resultIndex]);
 }
 
 static llvm::SmallVector<mlir::Value> nopLoopCond(fir::FirOpBuilder &builder,
@@ -394,9 +394,9 @@ static void genRuntimeSumBody(fir::FirOpBuilder &builder,
                       mlir::Type elementType, mlir::Value elem1,
                       mlir::Value elem2) -> mlir::Value {
     if (mlir::isa<mlir::FloatType>(elementType))
-      return builder.create<mlir::arith::AddFOp>(loc, elem1, elem2);
+      return mlir::arith::AddFOp::create(builder, loc, elem1, elem2);
     if (mlir::isa<mlir::IntegerType>(elementType))
-      return builder.create<mlir::arith::AddIOp>(loc, elem1, elem2);
+      return mlir::arith::AddIOp::create(builder, loc, elem1, elem2);
 
     llvm_unreachable("unsupported type");
     return {};
@@ -436,12 +436,12 @@ static void genRuntimeMaxvalBody(fir::FirOpBuilder &builder,
       // This libm function may not work properly for F128 arguments
       // on targets where long double is not F128. It is an LLVM issue,
       // but we just use normal select here to resolve all the cases.
-      auto compare = builder.create<mlir::arith::CmpFOp>(
+      auto compare = mlir::arith::CmpFOp::create(builder,
           loc, mlir::arith::CmpFPredicate::OGT, elem1, elem2);
-      return builder.create<mlir::arith::SelectOp>(loc, compare, elem1, elem2);
+      return mlir::arith::SelectOp::create(builder, loc, compare, elem1, elem2);
     }
     if (mlir::isa<mlir::IntegerType>(elementType))
-      return builder.create<mlir::arith::MaxSIOp>(loc, elem1, elem2);
+      return mlir::arith::MaxSIOp::create(builder, loc, elem1, elem2);
 
     llvm_unreachable("unsupported type");
     return {};
@@ -472,11 +472,11 @@ static void genRuntimeCountBody(fir::FirOpBuilder &builder,
     auto zero64 = builder.createIntegerConstant(loc, builder.getI64Type(), 0);
     auto one64 = builder.createIntegerConstant(loc, builder.getI64Type(), 1);
 
-    auto compare = builder.create<mlir::arith::CmpIOp>(
+    auto compare = mlir::arith::CmpIOp::create(builder,
         loc, mlir::arith::CmpIPredicate::eq, elem1, zero32);
     auto select =
-        builder.create<mlir::arith::SelectOp>(loc, compare, zero64, one64);
-    return builder.create<mlir::arith::AddIOp>(loc, select, elem2);
+        mlir::arith::SelectOp::create(builder, loc, compare, zero64, one64);
+    return mlir::arith::AddIOp::create(builder, loc, select, elem2);
   };
 
   // Count always gets I32 for elementType as it converts logical input to
@@ -501,14 +501,14 @@ static void genRuntimeAnyBody(fir::FirOpBuilder &builder,
                       mlir::Type elementType, mlir::Value elem1,
                       mlir::Value elem2) -> mlir::Value {
     auto zero = builder.createIntegerConstant(loc, elementType, 0);
-    return builder.create<mlir::arith::CmpIOp>(
+    return mlir::arith::CmpIOp::create(builder,
         loc, mlir::arith::CmpIPredicate::ne, elem1, zero);
   };
 
   auto continueCond = [](fir::FirOpBuilder builder, mlir::Location loc,
                          mlir::Value reductionVal) {
     auto one1 = builder.createIntegerConstant(loc, builder.getI1Type(), 1);
-    auto eor = builder.create<mlir::arith::XOrIOp>(loc, reductionVal, one1);
+    auto eor = mlir::arith::XOrIOp::create(builder, loc, reductionVal, one1);
     llvm::SmallVector<mlir::Value> results = {eor, reductionVal};
     return results;
   };
@@ -534,7 +534,7 @@ static void genRuntimeAllBody(fir::FirOpBuilder &builder,
                       mlir::Type elementType, mlir::Value elem1,
                       mlir::Value elem2) -> mlir::Value {
     auto zero = builder.createIntegerConstant(loc, elementType, 0);
-    return builder.create<mlir::arith::CmpIOp>(
+    return mlir::arith::CmpIOp::create(builder,
         loc, mlir::arith::CmpIPredicate::ne, elem1, zero);
   };
 
@@ -577,13 +577,13 @@ void fir::genMinMaxlocReductionLoop(
                                      fir::SequenceType::getUnknownExtent());
   mlir::Type arrTy = fir::SequenceType::get(flatShape, elementType);
   mlir::Type boxArrTy = fir::BoxType::get(arrTy);
-  array = builder.create<fir::ConvertOp>(loc, boxArrTy, array);
+  array = fir::ConvertOp::create(builder, loc, boxArrTy, array);
 
   mlir::Type resultElemType = hlfir::getFortranElementType(resultArr.getType());
   mlir::Value flagSet = builder.createIntegerConstant(loc, resultElemType, 1);
   mlir::Value zero = builder.createIntegerConstant(loc, resultElemType, 0);
   mlir::Value flagRef = builder.createTemporary(loc, resultElemType);
-  builder.create<fir::StoreOp>(loc, zero, flagRef);
+  fir::StoreOp::create(builder, loc, zero, flagRef);
 
   mlir::Value init = initVal(builder, loc, elementType);
   llvm::SmallVector<mlir::Value, Fortran::common::maxRank> bounds;
@@ -598,10 +598,10 @@ void fir::genMinMaxlocReductionLoop(
   for (unsigned i = 0; i < rank; ++i) {
     mlir::Value dimIdx = builder.createIntegerConstant(loc, idxTy, i);
     auto dims =
-        builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy, idxTy, array, dimIdx);
+        fir::BoxDimsOp::create(builder, loc, idxTy, idxTy, idxTy, array, dimIdx);
     mlir::Value len = dims.getResult(1);
     // We use C indexing here, so len-1 as loopcount
-    mlir::Value loopCount = builder.create<mlir::arith::SubIOp>(loc, len, one);
+    mlir::Value loopCount = mlir::arith::SubIOp::create(builder, loc, len, one);
     bounds.push_back(loopCount);
   }
   // Create a loop nest consisting of OP operations.
@@ -615,7 +615,7 @@ void fir::genMinMaxlocReductionLoop(
     mlir::Value step = one;
     mlir::Value loopCount = bounds[i - 1];
     auto loop =
-        builder.create<fir::DoLoopOp>(loc, zeroIdx, loopCount, step, false,
+        fir::DoLoopOp::create(builder, loc, zeroIdx, loopCount, step, false,
                                       /*finalCountValue=*/false, init);
     init = loop.getRegionIterArgs()[0];
     indices.push_back(loop.getInductionVar());
@@ -634,7 +634,7 @@ void fir::genMinMaxlocReductionLoop(
   // to return the updated value of the reduction to the enclosing
   // loops.
   for (unsigned i = 0; i < rank; ++i) {
-    auto result = builder.create<fir::ResultOp>(loc, reductionVal);
+    auto result = fir::ResultOp::create(builder, loc, reductionVal);
     // Proceed to the outer loop.
     auto loop = mlir::cast<fir::DoLoopOp>(result->getParentOp());
     reductionVal = loop.getResult(0);
@@ -646,7 +646,7 @@ void fir::genMinMaxlocReductionLoop(
   if (maskMayBeLogicalScalar) {
     if (fir::IfOp ifOp =
             mlir::dyn_cast<fir::IfOp>(builder.getBlock()->getParentOp())) {
-      builder.create<fir::ResultOp>(loc, reductionVal);
+      fir::ResultOp::create(builder, loc, reductionVal);
       builder.setInsertionPointAfter(ifOp);
       // Redefine flagSet to escape scope of ifOp
       flagSet = builder.createIntegerConstant(loc, resultElemType, 1);
@@ -689,9 +689,9 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
   mlir::Value returnValue = builder.createIntegerConstant(loc, resultElemTy, 0);
   mlir::Value resultArrSize = builder.createIntegerConstant(loc, idxTy, rank);
 
-  mlir::Value resultArrInit = builder.create<fir::AllocMemOp>(loc, resultTy);
-  mlir::Value resultArrShape = builder.create<fir::ShapeOp>(loc, resultArrSize);
-  mlir::Value resultArr = builder.create<fir::EmboxOp>(
+  mlir::Value resultArrInit = fir::AllocMemOp::create(builder, loc, resultTy);
+  mlir::Value resultArrShape = fir::ShapeOp::create(builder, loc, resultArrSize);
+  mlir::Value resultArr = fir::EmboxOp::create(builder,
       loc, resultBoxTy, resultArrInit, resultArrShape);
 
   mlir::Type resultRefTy = builder.getRefType(resultElemTy);
@@ -701,14 +701,14 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
                                        fir::SequenceType::getUnknownExtent());
     mlir::Type maskTy = fir::SequenceType::get(flatShape, maskElemType);
     mlir::Type boxMaskTy = fir::BoxType::get(maskTy);
-    mask = builder.create<fir::ConvertOp>(loc, boxMaskTy, mask);
+    mask = fir::ConvertOp::create(builder, loc, boxMaskTy, mask);
   }
 
   for (unsigned int i = 0; i < rank; ++i) {
     mlir::Value index = builder.createIntegerConstant(loc, idxTy, i);
     mlir::Value resultElemAddr =
-        builder.create<fir::CoordinateOp>(loc, resultRefTy, resultArr, index);
-    builder.create<fir::StoreOp>(loc, returnValue, resultElemAddr);
+        fir::CoordinateOp::create(builder, loc, resultRefTy, resultArr, index);
+    fir::StoreOp::create(builder, loc, returnValue, resultElemAddr);
   }
 
   auto genBodyOp =
@@ -720,17 +720,17 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
     if (maskRank > 0) {
       mlir::Type logicalRef = builder.getRefType(maskElemType);
       mlir::Value maskAddr =
-          builder.create<fir::CoordinateOp>(loc, logicalRef, mask, indices);
-      mlir::Value maskElem = builder.create<fir::LoadOp>(loc, maskAddr);
+          fir::CoordinateOp::create(builder, loc, logicalRef, mask, indices);
+      mlir::Value maskElem = fir::LoadOp::create(builder, loc, maskAddr);
 
       // fir::IfOp requires argument to be I1 - won't accept logical or any
       // other Integer.
       mlir::Type ifCompatType = builder.getI1Type();
       mlir::Value ifCompatElem =
-          builder.create<fir::ConvertOp>(loc, ifCompatType, maskElem);
+          fir::ConvertOp::create(builder, loc, ifCompatType, maskElem);
 
       llvm::SmallVector<mlir::Type> resultsTy = {elementType, elementType};
-      fir::IfOp ifOp = builder.create<fir::IfOp>(loc, elementType, ifCompatElem,
+      fir::IfOp ifOp = fir::IfOp::create(builder, loc, elementType, ifCompatElem,
                                                  /*withElseRegion=*/true);
       builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
     }
@@ -738,11 +738,11 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
     // Set flag that mask was true at some point
     mlir::Value flagSet = builder.createIntegerConstant(
         loc, mlir::cast<fir::ReferenceType>(flagRef.getType()).getEleTy(), 1);
-    mlir::Value isFirst = builder.create<fir::LoadOp>(loc, flagRef);
+    mlir::Value isFirst = fir::LoadOp::create(builder, loc, flagRef);
     mlir::Type eleRefTy = builder.getRefType(elementType);
     mlir::Value addr =
-        builder.create<fir::CoordinateOp>(loc, eleRefTy, array, indices);
-    mlir::Value elem = builder.create<fir::LoadOp>(loc, addr);
+        fir::CoordinateOp::create(builder, loc, eleRefTy, array, indices);
+    mlir::Value elem = fir::LoadOp::create(builder, loc, addr);
 
     mlir::Value cmp;
     if (mlir::isa<mlir::FloatType>(elementType)) {
@@ -750,20 +750,20 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
       // is not NaN. A OGL/OLT condition will usually work for this unless all
       // the values are Nan or Inf. This follows the same logic as
       // NumericCompare for Minloc/Maxlox in extrema.cpp.
-      cmp = builder.create<mlir::arith::CmpFOp>(
+      cmp = mlir::arith::CmpFOp::create(builder,
           loc,
           isMax ? mlir::arith::CmpFPredicate::OGT
                 : mlir::arith::CmpFPredicate::OLT,
           elem, reduction);
 
-      mlir::Value cmpNan = builder.create<mlir::arith::CmpFOp>(
+      mlir::Value cmpNan = mlir::arith::CmpFOp::create(builder,
           loc, mlir::arith::CmpFPredicate::UNE, reduction, reduction);
-      mlir::Value cmpNan2 = builder.create<mlir::arith::CmpFOp>(
+      mlir::Value cmpNan2 = mlir::arith::CmpFOp::create(builder,
           loc, mlir::arith::CmpFPredicate::OEQ, elem, elem);
-      cmpNan = builder.create<mlir::arith::AndIOp>(loc, cmpNan, cmpNan2);
-      cmp = builder.create<mlir::arith::OrIOp>(loc, cmp, cmpNan);
+      cmpNan = mlir::arith::AndIOp::create(builder, loc, cmpNan, cmpNan2);
+      cmp = mlir::arith::OrIOp::create(builder, loc, cmp, cmpNan);
     } else if (mlir::isa<mlir::IntegerType>(elementType)) {
-      cmp = builder.create<mlir::arith::CmpIOp>(
+      cmp = mlir::arith::CmpIOp::create(builder,
           loc,
           isMax ? mlir::arith::CmpIPredicate::sgt
                 : mlir::arith::CmpIPredicate::slt,
@@ -773,15 +773,15 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
     }
 
     // The condition used for the loop is isFirst || <the condition above>.
-    isFirst = builder.create<fir::ConvertOp>(loc, cmp.getType(), isFirst);
-    isFirst = builder.create<mlir::arith::XOrIOp>(
+    isFirst = fir::ConvertOp::create(builder, loc, cmp.getType(), isFirst);
+    isFirst = mlir::arith::XOrIOp::create(builder,
         loc, isFirst, builder.createIntegerConstant(loc, cmp.getType(), 1));
-    cmp = builder.create<mlir::arith::OrIOp>(loc, cmp, isFirst);
-    fir::IfOp ifOp = builder.create<fir::IfOp>(loc, elementType, cmp,
+    cmp = mlir::arith::OrIOp::create(builder, loc, cmp, isFirst);
+    fir::IfOp ifOp = fir::IfOp::create(builder, loc, elementType, cmp,
                                                /*withElseRegion*/ true);
 
     builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
-    builder.create<fir::StoreOp>(loc, flagSet, flagRef);
+    fir::StoreOp::create(builder, loc, flagSet, flagRef);
     mlir::Type resultElemTy = hlfir::getFortranElementType(resultArr.getType());
     mlir::Type returnRefTy = builder.getRefType(resultElemTy);
     mlir::IndexType idxTy = builder.getIndexType();
@@ -791,16 +791,16 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
     for (unsigned int i = 0; i < rank; ++i) {
       mlir::Value index = builder.createIntegerConstant(loc, idxTy, i);
       mlir::Value resultElemAddr =
-          builder.create<fir::CoordinateOp>(loc, returnRefTy, resultArr, index);
+          fir::CoordinateOp::create(builder, loc, returnRefTy, resultArr, index);
       mlir::Value convert =
-          builder.create<fir::ConvertOp>(loc, resultElemTy, indices[i]);
+          fir::ConvertOp::create(builder, loc, resultElemTy, indices[i]);
       mlir::Value fortranIndex =
-          builder.create<mlir::arith::AddIOp>(loc, convert, one);
-      builder.create<fir::StoreOp>(loc, fortranIndex, resultElemAddr);
+          mlir::arith::AddIOp::create(builder, loc, convert, one);
+      fir::StoreOp::create(builder, loc, fortranIndex, resultElemAddr);
     }
-    builder.create<fir::ResultOp>(loc, elem);
+    fir::ResultOp::create(builder, loc, elem);
     builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-    builder.create<fir::ResultOp>(loc, reduction);
+    fir::ResultOp::create(builder, loc, reduction);
     builder.setInsertionPointAfter(ifOp);
     mlir::Value reductionVal = ifOp.getResult(0);
 
@@ -808,9 +808,9 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
     if (maskRank > 0) {
       fir::IfOp ifOp =
           mlir::dyn_cast<fir::IfOp>(builder.getBlock()->getParentOp());
-      builder.create<fir::ResultOp>(loc, reductionVal);
+      fir::ResultOp::create(builder, loc, reductionVal);
       builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-      builder.create<fir::ResultOp>(loc, reduction);
+      fir::ResultOp::create(builder, loc, reduction);
       reductionVal = ifOp.getResult(0);
       builder.setInsertionPointAfter(ifOp);
     }
@@ -825,11 +825,11 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
     mlir::Type logical = maskElemType;
     mlir::Type logicalRefTy = builder.getRefType(logical);
     mlir::Value condAddr =
-        builder.create<fir::BoxAddrOp>(loc, logicalRefTy, mask);
-    mlir::Value cond = builder.create<fir::LoadOp>(loc, condAddr);
-    mlir::Value condI1 = builder.create<fir::ConvertOp>(loc, i1Type, cond);
+        fir::BoxAddrOp::create(builder, loc, logicalRefTy, mask);
+    mlir::Value cond = fir::LoadOp::create(builder, loc, condAddr);
+    mlir::Value condI1 = fir::ConvertOp::create(builder, loc, i1Type, cond);
 
-    fir::IfOp ifOp = builder.create<fir::IfOp>(loc, elementType, condI1,
+    fir::IfOp ifOp = fir::IfOp::create(builder, loc, elementType, condI1,
                                                /*withElseRegion=*/true);
 
     builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
@@ -839,7 +839,7 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
     } else {
       basicValue = builder.createRealConstant(loc, elementType, 0);
     }
-    builder.create<fir::ResultOp>(loc, basicValue);
+    fir::ResultOp::create(builder, loc, basicValue);
 
     builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
   }
@@ -847,7 +847,7 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
                       const mlir::Type &resultElemType, mlir::Value resultArr,
                       mlir::Value index) {
     mlir::Type resultRefTy = builder.getRefType(resultElemType);
-    return builder.create<fir::CoordinateOp>(loc, resultRefTy, resultArr,
+    return fir::CoordinateOp::create(builder, loc, resultRefTy, resultArr,
                                              index);
   };
 
@@ -859,25 +859,25 @@ static void genRuntimeMinMaxlocBody(fir::FirOpBuilder &builder,
   if (isDim) {
     mlir::Type resultBoxTy =
         fir::BoxType::get(fir::HeapType::get(resultElemTy));
-    mlir::Value outputArr = builder.create<fir::ConvertOp>(
+    mlir::Value outputArr = fir::ConvertOp::create(builder,
         loc, builder.getRefType(resultBoxTy), funcOp.front().getArgument(0));
-    mlir::Value resultArrScalar = builder.create<fir::ConvertOp>(
+    mlir::Value resultArrScalar = fir::ConvertOp::create(builder,
         loc, fir::HeapType::get(resultElemTy), resultArrInit);
     mlir::Value resultBox =
-        builder.create<fir::EmboxOp>(loc, resultBoxTy, resultArrScalar);
-    builder.create<fir::StoreOp>(loc, resultBox, outputArr);
+        fir::EmboxOp::create(builder, loc, resultBoxTy, resultArrScalar);
+    fir::StoreOp::create(builder, loc, resultBox, outputArr);
   } else {
     fir::SequenceType::Shape resultShape(1, rank);
     mlir::Type outputArrTy = fir::SequenceType::get(resultShape, resultElemTy);
     mlir::Type outputHeapTy = fir::HeapType::get(outputArrTy);
     mlir::Type outputBoxTy = fir::BoxType::get(outputHeapTy);
     mlir::Type outputRefTy = builder.getRefType(outputBoxTy);
-    mlir::Value outputArr = builder.create<fir::ConvertOp>(
+    mlir::Value outputArr = fir::ConvertOp::create(builder,
         loc, outputRefTy, funcOp.front().getArgument(0));
-    builder.create<fir::StoreOp>(loc, resultArr, outputArr);
+    fir::StoreOp::create(builder, loc, resultArr, outputArr);
   }
 
-  builder.create<mlir::func::ReturnOp>(loc);
+  mlir::func::ReturnOp::create(builder, loc);
 }
 
 /// Generate function type for the simplified version of RTNAME(DotProduct)
@@ -929,10 +929,10 @@ static void genRuntimeDotBody(fir::FirOpBuilder &builder,
   fir::SequenceType::Shape flatShape = {fir::SequenceType::getUnknownExtent()};
   mlir::Type arrTy1 = fir::SequenceType::get(flatShape, arg1ElementTy);
   mlir::Type boxArrTy1 = fir::BoxType::get(arrTy1);
-  mlir::Value array1 = builder.create<fir::ConvertOp>(loc, boxArrTy1, arg1);
+  mlir::Value array1 = fir::ConvertOp::create(builder, loc, boxArrTy1, arg1);
   mlir::Type arrTy2 = fir::SequenceType::get(flatShape, arg2ElementTy);
   mlir::Type boxArrTy2 = fir::BoxType::get(arrTy2);
-  mlir::Value array2 = builder.create<fir::ConvertOp>(loc, boxArrTy2, arg2);
+  mlir::Value array2 = fir::ConvertOp::create(builder, loc, boxArrTy2, arg2);
   // This version takes the loop trip count from the first argument.
   // If the first argument's box has unknown (at compilation time)
   // extent, then it may be better to take the extent from the second
@@ -942,14 +942,14 @@ static void genRuntimeDotBody(fir::FirOpBuilder &builder,
   // is more profitable to call.
   // Note that we can assume that both arguments have the same extent.
   auto dims =
-      builder.create<fir::BoxDimsOp>(loc, idxTy, idxTy, idxTy, array1, zeroIdx);
+      fir::BoxDimsOp::create(builder, loc, idxTy, idxTy, idxTy, array1, zeroIdx);
   mlir::Value len = dims.getResult(1);
   mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
   mlir::Value step = one;
 
   // We use C indexing here, so len-1 as loopcount
-  mlir::Value loopCount = builder.create<mlir::arith::SubIOp>(loc, len, one);
-  auto loop = builder.create<fir::DoLoopOp>(loc, zeroIdx, loopCount, step,
+  mlir::Value loopCount = mlir::arith::SubIOp::create(builder, loc, len, one);
+  auto loop = fir::DoLoopOp::create(builder, loc, zeroIdx, loopCount, step,
                                             /*unordered=*/false,
                                             /*finalCountValue=*/false, zero);
   mlir::Value sumVal = loop.getRegionIterArgs()[0];
@@ -961,33 +961,33 @@ static void genRuntimeDotBody(fir::FirOpBuilder &builder,
   mlir::Type eleRef1Ty = builder.getRefType(arg1ElementTy);
   mlir::Value index = loop.getInductionVar();
   mlir::Value addr1 =
-      builder.create<fir::CoordinateOp>(loc, eleRef1Ty, array1, index);
-  mlir::Value elem1 = builder.create<fir::LoadOp>(loc, addr1);
+      fir::CoordinateOp::create(builder, loc, eleRef1Ty, array1, index);
+  mlir::Value elem1 = fir::LoadOp::create(builder, loc, addr1);
   // Convert to the result type.
-  elem1 = builder.create<fir::ConvertOp>(loc, resultElementType, elem1);
+  elem1 = fir::ConvertOp::create(builder, loc, resultElementType, elem1);
 
   mlir::Type eleRef2Ty = builder.getRefType(arg2ElementTy);
   mlir::Value addr2 =
-      builder.create<fir::CoordinateOp>(loc, eleRef2Ty, array2, index);
-  mlir::Value elem2 = builder.create<fir::LoadOp>(loc, addr2);
+      fir::CoordinateOp::create(builder, loc, eleRef2Ty, array2, index);
+  mlir::Value elem2 = fir::LoadOp::create(builder, loc, addr2);
   // Convert to the result type.
-  elem2 = builder.create<fir::ConvertOp>(loc, resultElementType, elem2);
+  elem2 = fir::ConvertOp::create(builder, loc, resultElementType, elem2);
 
   if (mlir::isa<mlir::FloatType>(resultElementType))
-    sumVal = builder.create<mlir::arith::AddFOp>(
-        loc, builder.create<mlir::arith::MulFOp>(loc, elem1, elem2), sumVal);
+    sumVal = mlir::arith::AddFOp::create(builder,
+        loc, mlir::arith::MulFOp::create(builder, loc, elem1, elem2), sumVal);
   else if (mlir::isa<mlir::IntegerType>(resultElementType))
-    sumVal = builder.create<mlir::arith::AddIOp>(
-        loc, builder.create<mlir::arith::MulIOp>(loc, elem1, elem2), sumVal);
+    sumVal = mlir::arith::AddIOp::create(builder,
+        loc, mlir::arith::MulIOp::create(builder, loc, elem1, elem2), sumVal);
   else
     llvm_unreachable("unsupported type");
 
-  builder.create<fir::ResultOp>(loc, sumVal);
+  fir::ResultOp::create(builder, loc, sumVal);
   // End of loop.
   builder.restoreInsertionPoint(loopEndPt);
 
   mlir::Value resultVal = loop.getResult(0);
-  builder.create<mlir::func::ReturnOp>(loc, resultVal);
+  mlir::func::ReturnOp::create(builder, loc, resultVal);
 }
 
 mlir::func::FuncOp SimplifyIntrinsicsPass::getOrCreateFunction(
@@ -1229,7 +1229,7 @@ void SimplifyIntrinsicsPass::simplifyMinMaxlocReduction(
 
   mlir::func::FuncOp newFunc =
       getOrCreateFunction(builder, funcName, typeGenerator, bodyGenerator);
-  builder.create<fir::CallOp>(loc, newFunc,
+  fir::CallOp::create(builder, loc, newFunc,
                               mlir::ValueRange{args[0], args[1], mask});
   call->dropAllReferences();
   call->erase();
@@ -1259,7 +1259,7 @@ void SimplifyIntrinsicsPass::simplifyReductionBody(
   mlir::func::FuncOp newFunc =
       getOrCreateFunction(builder, funcName, typeGenerator, bodyGenerator);
   auto newCall =
-      builder.create<fir::CallOp>(loc, newFunc, mlir::ValueRange{args[0]});
+      fir::CallOp::create(builder, loc, newFunc, mlir::ValueRange{args[0]});
   call->replaceAllUsesWith(newCall.getResults());
   call->dropAllReferences();
   call->erase();
@@ -1344,7 +1344,7 @@ void SimplifyIntrinsicsPass::runOnOperation() {
 
           mlir::func::FuncOp newFunc = getOrCreateFunction(
               builder, typedFuncName, typeGenerator, bodyGenerator);
-          auto newCall = builder.create<fir::CallOp>(loc, newFunc,
+          auto newCall = fir::CallOp::create(builder, loc, newFunc,
                                                      mlir::ValueRange{v1, v2});
           call->replaceAllUsesWith(newCall.getResults());
           call->dropAllReferences();
diff --git a/flang/lib/Optimizer/Transforms/StackArrays.cpp b/flang/lib/Optimizer/Transforms/StackArrays.cpp
index bc8a9497fbb70..e34f56367a7ad 100644
--- a/flang/lib/Optimizer/Transforms/StackArrays.cpp
+++ b/flang/lib/Optimizer/Transforms/StackArrays.cpp
@@ -569,7 +569,7 @@ static mlir::Value convertAllocationType(mlir::PatternRewriter &rewriter,
   auto insertionPoint = rewriter.saveInsertionPoint();
   rewriter.setInsertionPointAfter(stack.getDefiningOp());
   mlir::Value conv =
-      rewriter.create<fir::ConvertOp>(loc, firHeapTy, stack).getResult();
+      fir::ConvertOp::create(rewriter, loc, firHeapTy, stack).getResult();
   rewriter.restoreInsertionPoint(insertionPoint);
   return conv;
 }
@@ -758,7 +758,7 @@ AllocMemConversion::insertAlloca(fir::AllocMemOp &oldAlloc,
 
   llvm::StringRef uniqName = unpackName(oldAlloc.getUniqName());
   llvm::StringRef bindcName = unpackName(oldAlloc.getBindcName());
-  auto alloca = rewriter.create<fir::AllocaOp>(loc, varTy, uniqName, bindcName,
+  auto alloca = fir::AllocaOp::create(rewriter, loc, varTy, uniqName, bindcName,
                                                oldAlloc.getTypeparams(),
                                                oldAlloc.getShape());
   if (emitLifetimeMarkers)



More information about the flang-commits mailing list