[Mlir-commits] [mlir] [mlir][NFC] update `mlir/Dialect` create APIs (17/n) (PR #149924)

Maksim Levental llvmlistbot at llvm.org
Thu Jul 24 12:23:17 PDT 2025


https://github.com/makslevental updated https://github.com/llvm/llvm-project/pull/149924

>From e3955b8b51e9e48c8441528202a3a5c4f70c9e65 Mon Sep 17 00:00:00 2001
From: max <maksim.levental at gmail.com>
Date: Mon, 21 Jul 2025 18:18:54 -0400
Subject: [PATCH] [mlir][NFC] update `mlir/Dialect` create APIs (17/n)

See https://github.com/llvm/llvm-project/pull/147168 for more info.
---
 mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp      | 300 +++++++++--------
 .../TransformOps/LinalgTransformOps.cpp       |  30 +-
 .../BufferizableOpInterfaceImpl.cpp           |   6 +-
 .../Transforms/ConvertConv2DToImg2Col.cpp     | 171 +++++-----
 .../Transforms/ConvertToDestinationStyle.cpp  |  79 ++---
 .../Transforms/DataLayoutPropagation.cpp      |  79 ++---
 ...DecomposeGenericByUnfoldingPermutation.cpp |  17 +-
 .../Linalg/Transforms/DecomposeLinalgOps.cpp  |  26 +-
 .../Dialect/Linalg/Transforms/Detensorize.cpp |  10 +-
 .../Linalg/Transforms/DropUnitDims.cpp        |  51 +--
 .../Linalg/Transforms/ElementwiseOpFusion.cpp |  88 ++---
 .../Linalg/Transforms/ElementwiseToLinalg.cpp |   6 +-
 .../EraseUnusedOperandsAndResults.cpp         |   4 +-
 .../FusePadOpWithLinalgProducer.cpp           |  14 +-
 mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp |   2 +-
 .../Linalg/Transforms/Generalization.cpp      |   5 +-
 .../Linalg/Transforms/HoistPadding.cpp        |  42 +--
 .../Dialect/Linalg/Transforms/Hoisting.cpp    |   6 +-
 .../Transforms/InlineScalarOperands.cpp       |  12 +-
 .../Dialect/Linalg/Transforms/Interchange.cpp |   3 +-
 mlir/lib/Dialect/Linalg/Transforms/Loops.cpp  |  11 +-
 .../Transforms/MeshShardingInterfaceImpl.cpp  |  28 +-
 .../Linalg/Transforms/NamedOpConversions.cpp  |  17 +-
 .../Transforms/PackAndUnpackPatterns.cpp      |  12 +-
 .../Linalg/Transforms/PadTilingInterface.cpp  |  12 +-
 .../lib/Dialect/Linalg/Transforms/Padding.cpp |  12 +-
 .../Dialect/Linalg/Transforms/Promotion.cpp   |  50 +--
 .../Transforms/RuntimeOpVerification.cpp      |   4 +-
 mlir/lib/Dialect/Linalg/Transforms/Split.cpp  |   4 +-
 .../Linalg/Transforms/SplitReduction.cpp      |  50 +--
 .../SwapExtractSliceWithFillPatterns.cpp      |   8 +-
 mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp |  33 +-
 .../Linalg/Transforms/TilingInterfaceImpl.cpp |  88 ++---
 .../Dialect/Linalg/Transforms/Transforms.cpp  | 131 ++++----
 .../Linalg/Transforms/TransposeConv2D.cpp     |   8 +-
 .../Linalg/Transforms/TransposeMatmul.cpp     |  44 +--
 .../Linalg/Transforms/Vectorization.cpp       | 313 +++++++++---------
 .../Linalg/Transforms/WinogradConv2D.cpp      | 201 +++++------
 mlir/lib/Dialect/Linalg/Utils/Utils.cpp       |  30 +-
 39 files changed, 1031 insertions(+), 976 deletions(-)

diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 3aa6ac3ea0918..d5e2ed6bad7b1 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -62,10 +62,10 @@ static OpFoldResult getDimValue(OpBuilder &builder, Location loc, Value v,
   return getAsOpFoldResult(
       TypeSwitch<Type, Value>(v.getType())
           .Case<RankedTensorType>([&](RankedTensorType t) -> Value {
-            return builder.create<tensor::DimOp>(loc, v, dim);
+            return tensor::DimOp::create(builder, loc, v, dim);
           })
           .Case<MemRefType>([&](MemRefType t) -> Value {
-            return builder.create<memref::DimOp>(loc, v, dim);
+            return memref::DimOp::create(builder, loc, v, dim);
           }));
 }
 
@@ -77,12 +77,12 @@ static Operation *getSlice(OpBuilder &b, Location loc, Value source,
                            ArrayRef<OpFoldResult> strides) {
   return TypeSwitch<Type, Operation *>(source.getType())
       .Case<RankedTensorType>([&](RankedTensorType t) -> Operation * {
-        return b.create<tensor::ExtractSliceOp>(loc, source, offsets, sizes,
-                                                strides);
+        return tensor::ExtractSliceOp::create(b, loc, source, offsets, sizes,
+                                              strides);
       })
       .Case<MemRefType>([&](MemRefType type) -> Operation * {
-        return b.create<memref::SubViewOp>(loc, source, offsets, sizes,
-                                           strides);
+        return memref::SubViewOp::create(b, loc, source, offsets, sizes,
+                                         strides);
       })
       .Default([&](Type t) -> Operation * { return nullptr; });
 }
@@ -453,35 +453,35 @@ class RegionBuilderHelper {
     builder.setInsertionPointToEnd(&block);
     switch (unaryFn) {
     case UnaryFn::exp:
-      return builder.create<math::ExpOp>(arg.getLoc(), arg);
+      return math::ExpOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::log:
-      return builder.create<math::LogOp>(arg.getLoc(), arg);
+      return math::LogOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::abs:
-      return builder.create<math::AbsFOp>(arg.getLoc(), arg);
+      return math::AbsFOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::ceil:
-      return builder.create<math::CeilOp>(arg.getLoc(), arg);
+      return math::CeilOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::floor:
-      return builder.create<math::FloorOp>(arg.getLoc(), arg);
+      return math::FloorOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::negf:
-      return builder.create<arith::NegFOp>(arg.getLoc(), arg);
+      return arith::NegFOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::reciprocal: {
       Attribute oneAttr = builder.getOneAttr(arg.getType());
-      auto one = builder.create<arith::ConstantOp>(arg.getLoc(),
-                                                   ::cast<TypedAttr>(oneAttr));
-      return builder.create<arith::DivFOp>(arg.getLoc(), one, arg);
+      auto one = arith::ConstantOp::create(builder, arg.getLoc(),
+                                           ::cast<TypedAttr>(oneAttr));
+      return arith::DivFOp::create(builder, arg.getLoc(), one, arg);
     }
     case UnaryFn::round:
-      return builder.create<math::RoundOp>(arg.getLoc(), arg);
+      return math::RoundOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::sqrt:
-      return builder.create<math::SqrtOp>(arg.getLoc(), arg);
+      return math::SqrtOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::rsqrt:
-      return builder.create<math::RsqrtOp>(arg.getLoc(), arg);
+      return math::RsqrtOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::square:
-      return builder.create<arith::MulFOp>(arg.getLoc(), arg, arg);
+      return arith::MulFOp::create(builder, arg.getLoc(), arg, arg);
     case UnaryFn::tanh:
-      return builder.create<math::TanhOp>(arg.getLoc(), arg);
+      return math::TanhOp::create(builder, arg.getLoc(), arg);
     case UnaryFn::erf:
-      return builder.create<math::ErfOp>(arg.getLoc(), arg);
+      return math::ErfOp::create(builder, arg.getLoc(), arg);
     }
     if (emitError) {
       emitError() << "unsupported unary function";
@@ -516,17 +516,17 @@ class RegionBuilderHelper {
     switch (binaryFn) {
     case BinaryFn::add:
       if (allComplex)
-        return builder.create<complex::AddOp>(arg0.getLoc(), arg0, arg1);
+        return complex::AddOp::create(builder, arg0.getLoc(), arg0, arg1);
       if (allFloatingPoint)
-        return builder.create<arith::AddFOp>(arg0.getLoc(), arg0, arg1);
+        return arith::AddFOp::create(builder, arg0.getLoc(), arg0, arg1);
       if (allBool)
-        return builder.create<arith::OrIOp>(arg0.getLoc(), arg0, arg1);
-      return builder.create<arith::AddIOp>(arg0.getLoc(), arg0, arg1);
+        return arith::OrIOp::create(builder, arg0.getLoc(), arg0, arg1);
+      return arith::AddIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::sub:
       if (allComplex)
-        return builder.create<complex::SubOp>(arg0.getLoc(), arg0, arg1);
+        return complex::SubOp::create(builder, arg0.getLoc(), arg0, arg1);
       if (allFloatingPoint)
-        return builder.create<arith::SubFOp>(arg0.getLoc(), arg0, arg1);
+        return arith::SubFOp::create(builder, arg0.getLoc(), arg0, arg1);
       if (allBool) {
         if (emitError) {
           emitError() << "unsupported operation: sub with bools";
@@ -534,20 +534,20 @@ class RegionBuilderHelper {
         }
         llvm_unreachable("unsupported operation: sub with bools");
       }
-      return builder.create<arith::SubIOp>(arg0.getLoc(), arg0, arg1);
+      return arith::SubIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::mul:
       if (allComplex)
-        return builder.create<complex::MulOp>(arg0.getLoc(), arg0, arg1);
+        return complex::MulOp::create(builder, arg0.getLoc(), arg0, arg1);
       if (allFloatingPoint)
-        return builder.create<arith::MulFOp>(arg0.getLoc(), arg0, arg1);
+        return arith::MulFOp::create(builder, arg0.getLoc(), arg0, arg1);
       if (allBool)
-        return builder.create<arith::AndIOp>(arg0.getLoc(), arg0, arg1);
-      return builder.create<arith::MulIOp>(arg0.getLoc(), arg0, arg1);
+        return arith::AndIOp::create(builder, arg0.getLoc(), arg0, arg1);
+      return arith::MulIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::div:
       if (allComplex)
-        return builder.create<complex::DivOp>(arg0.getLoc(), arg0, arg1);
+        return complex::DivOp::create(builder, arg0.getLoc(), arg0, arg1);
       if (allFloatingPoint)
-        return builder.create<arith::DivFOp>(arg0.getLoc(), arg0, arg1);
+        return arith::DivFOp::create(builder, arg0.getLoc(), arg0, arg1);
       if (allBool) {
         if (emitError) {
           emitError() << "unsupported operation: div with bools";
@@ -555,7 +555,7 @@ class RegionBuilderHelper {
         }
         llvm_unreachable("unsupported operation: div with bools");
       }
-      return builder.create<arith::DivSIOp>(arg0.getLoc(), arg0, arg1);
+      return arith::DivSIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::div_unsigned:
       if (!allInteger || allBool) {
         if (emitError) {
@@ -564,30 +564,30 @@ class RegionBuilderHelper {
         }
         llvm_unreachable("unsupported operation: unsigned div not on uint");
       }
-      return builder.create<arith::DivUIOp>(arg0.getLoc(), arg0, arg1);
+      return arith::DivUIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::max_signed:
       assert(!allComplex);
       if (allFloatingPoint)
-        return builder.create<arith::MaximumFOp>(arg0.getLoc(), arg0, arg1);
-      return builder.create<arith::MaxSIOp>(arg0.getLoc(), arg0, arg1);
+        return arith::MaximumFOp::create(builder, arg0.getLoc(), arg0, arg1);
+      return arith::MaxSIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::min_signed:
       assert(!allComplex);
       if (allFloatingPoint)
-        return builder.create<arith::MinimumFOp>(arg0.getLoc(), arg0, arg1);
-      return builder.create<arith::MinSIOp>(arg0.getLoc(), arg0, arg1);
+        return arith::MinimumFOp::create(builder, arg0.getLoc(), arg0, arg1);
+      return arith::MinSIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::max_unsigned:
       assert(!allComplex);
       if (allFloatingPoint)
-        return builder.create<arith::MaximumFOp>(arg0.getLoc(), arg0, arg1);
-      return builder.create<arith::MaxUIOp>(arg0.getLoc(), arg0, arg1);
+        return arith::MaximumFOp::create(builder, arg0.getLoc(), arg0, arg1);
+      return arith::MaxUIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::min_unsigned:
       assert(!allComplex);
       if (allFloatingPoint)
-        return builder.create<arith::MinimumFOp>(arg0.getLoc(), arg0, arg1);
-      return builder.create<arith::MinUIOp>(arg0.getLoc(), arg0, arg1);
+        return arith::MinimumFOp::create(builder, arg0.getLoc(), arg0, arg1);
+      return arith::MinUIOp::create(builder, arg0.getLoc(), arg0, arg1);
     case BinaryFn::powf:
       assert(allFloatingPoint);
-      return builder.create<math::PowFOp>(arg0.getLoc(), arg0, arg1);
+      return math::PowFOp::create(builder, arg0.getLoc(), arg0, arg1);
     }
     if (emitError) {
       emitError() << "unsupported binary function";
@@ -610,7 +610,7 @@ class RegionBuilderHelper {
     case TernaryFn::select:
       if (!headBool && !(tailFloatingPoint || tailInteger))
         llvm_unreachable("unsupported non numeric type");
-      return builder.create<arith::SelectOp>(arg0.getLoc(), arg0, arg1, arg2);
+      return arith::SelectOp::create(builder, arg0.getLoc(), arg0, arg1, arg2);
     }
     if (emitError) {
       emitError() << "unsupported ternary function";
@@ -639,7 +639,7 @@ class RegionBuilderHelper {
     OpBuilder::InsertionGuard g(builder);
     builder.setInsertionPointToEnd(&block);
     Location loc = builder.getUnknownLoc();
-    builder.create<YieldOp>(loc, values);
+    YieldOp::create(builder, loc, values);
   }
 
   Value constant(const std::string &value) {
@@ -647,13 +647,14 @@ class RegionBuilderHelper {
     builder.setInsertionPointToEnd(&block);
     Location loc = builder.getUnknownLoc();
     Attribute valueAttr = parseAttribute(value, builder.getContext());
-    return builder.create<arith::ConstantOp>(loc, ::cast<TypedAttr>(valueAttr));
+    return arith::ConstantOp::create(builder, loc,
+                                     ::cast<TypedAttr>(valueAttr));
   }
 
   Value index(int64_t dim) {
     OpBuilder::InsertionGuard g(builder);
     builder.setInsertionPointToEnd(&block);
-    return builder.create<IndexOp>(builder.getUnknownLoc(), dim);
+    return IndexOp::create(builder, builder.getUnknownLoc(), dim);
   }
 
   Type getIntegerType(unsigned width) {
@@ -749,14 +750,14 @@ struct FoldFillWithTensorReshape : OpRewritePattern<TensorReshapeOp> {
     TensorReshapeOp newInit;
     if constexpr (std::is_same<TensorReshapeOp, tensor::ExpandShapeOp>::value) {
 
-      newInit = rewriter.create<TensorReshapeOp>(
-          loc, reshapeOp.getResultType(), oldFill.output(),
+      newInit = TensorReshapeOp::create(
+          rewriter, loc, reshapeOp.getResultType(), oldFill.output(),
           reshapeOp.getReassociation(), reshapeOp.getOutputShape(),
           reshapeOp.getStaticOutputShape());
     } else {
-      newInit = rewriter.create<TensorReshapeOp>(loc, reshapeOp.getResultType(),
-                                                 oldFill.output(),
-                                                 reshapeOp.getReassociation());
+      newInit = TensorReshapeOp::create(
+          rewriter, loc, reshapeOp.getResultType(), oldFill.output(),
+          reshapeOp.getReassociation());
     }
     rewriter.replaceOpWithNewOp<FillOp>(reshapeOp, ValueRange{oldFill.value()},
                                         ValueRange{newInit});
@@ -786,17 +787,17 @@ struct FoldFillWithPad final : public OpRewritePattern<tensor::PadOp> {
       return rewriter.notifyMatchFailure(
           padOp, "failed to reify tensor.pad op result shape");
 
-    auto emptyTensor = rewriter.create<tensor::EmptyOp>(
-        padOp.getLoc(), reifiedShape.front(),
-        padOp.getResultType().getElementType());
+    auto emptyTensor =
+        tensor::EmptyOp::create(rewriter, padOp.getLoc(), reifiedShape.front(),
+                                padOp.getResultType().getElementType());
     Value replacement =
         rewriter
             .create<FillOp>(fillOp.getLoc(), ValueRange{padValue},
                             ValueRange{emptyTensor})
             .getResult(0);
     if (replacement.getType() != padOp.getResultType()) {
-      replacement = rewriter.create<tensor::CastOp>(
-          fillOp.getLoc(), padOp.getResultType(), replacement);
+      replacement = tensor::CastOp::create(rewriter, fillOp.getLoc(),
+                                           padOp.getResultType(), replacement);
     }
     rewriter.replaceOp(padOp, replacement);
     return success();
@@ -889,7 +890,7 @@ struct FoldInsertPadIntoFill : public OpRewritePattern<tensor::InsertSliceOp> {
     for (int i = 0, e = srcPadType.getRank(); i < e; ++i) {
       if (srcPadType.isDynamicDim(i)) {
         newSizes.push_back(
-            rewriter.create<tensor::DimOp>(loc, srcPadOp.getSource(), i)
+            tensor::DimOp::create(rewriter, loc, srcPadOp.getSource(), i)
                 .getResult());
       } else {
         newSizes.push_back(rewriter.getIndexAttr(srcPadType.getDimSize(i)));
@@ -942,8 +943,8 @@ static FailureOr<FillOp> foldFillPackIntoFillOp(RewriterBase &rewriter,
   if (!packOpDest.hasOneUse())
     return failure();
 
-  return rewriter.create<linalg::FillOp>(packOp.getLoc(), fillOp.getInputs(),
-                                         packOp.getDest());
+  return linalg::FillOp::create(rewriter, packOp.getLoc(), fillOp.getInputs(),
+                                packOp.getDest());
 }
 
 /// Wrapper pattern that applies foldFillPackIntoFillOp method.
@@ -1042,8 +1043,8 @@ struct FoldConcatsOfFill : public OpRewritePattern<tensor::ConcatOp> {
           concatOp, "not all operands are defined by a compatible fill op");
     }
 
-    Value outsConcat = rewriter.create<tensor::ConcatOp>(
-        concatOp.getLoc(), concatOp.getDim(), allOuts);
+    Value outsConcat = tensor::ConcatOp::create(rewriter, concatOp.getLoc(),
+                                                concatOp.getDim(), allOuts);
     rewriter.replaceOpWithNewOp<linalg::FillOp>(
         concatOp, firstFillOp.getDpsInputOperand(0)->get(), outsConcat);
     return success();
@@ -1407,14 +1408,14 @@ struct EraseIdentityLinalgOp : public OpRewritePattern<OpTy> {
         // TODO: unify the two ops?
         if (sparse_tensor::getSparseTensorEncoding(returnType) ||
             sparse_tensor::getSparseTensorEncoding(resultType))
-          returnedArg = rewriter.create<sparse_tensor::ConvertOp>(
-              linalgOp.getLoc(), resultType, returnedArg);
+          returnedArg = sparse_tensor::ConvertOp::create(
+              rewriter, linalgOp.getLoc(), resultType, returnedArg);
         else {
           if (!tensor::CastOp::areCastCompatible(returnedArg.getType(),
                                                  resultType))
             return failure();
-          returnedArg = rewriter.create<tensor::CastOp>(
-              linalgOp.getLoc(), resultType, returnedArg);
+          returnedArg = tensor::CastOp::create(rewriter, linalgOp.getLoc(),
+                                               resultType, returnedArg);
         }
       }
       returnedArgs.push_back(returnedArg);
@@ -1528,7 +1529,7 @@ static void addBodyWithPayloadOp(OpAsmParser &parser, OperationState &result,
       TypeRange{llvm::cast<ShapedType>(result.operands.back().getType())
                     .getElementType()},
       payloadOpAttrs);
-  b.create<YieldOp>(result.location, payloadOp->getResults());
+  YieldOp::create(b, result.location, payloadOp->getResults());
 }
 
 ParseResult MapOp::parse(OpAsmParser &parser, OperationState &result) {
@@ -1945,7 +1946,7 @@ static void buildIdentityRegion(OpBuilder &builder, Location loc,
   buildGenericRegion(builder, loc, region, inputs, outputs,
                      [](OpBuilder &b, Location loc, ValueRange args) {
                        if (!args.empty())
-                         b.create<linalg::YieldOp>(loc, args[0]);
+                         linalg::YieldOp::create(b, loc, args[0]);
                      });
 }
 
@@ -2138,7 +2139,7 @@ struct SwapTransposeWithBroadcast : OpRewritePattern<linalg::TransposeOp> {
     unsigned inputRank = broadcastInputTy.getRank();
     for (unsigned i = 0; i < inputRank; ++i) {
       if (broadcastInputTy.isDynamicDim(i)) {
-        dims.push_back(rewriter.create<tensor::DimOp>(loc, broadcastInput, i)
+        dims.push_back(tensor::DimOp::create(rewriter, loc, broadcastInput, i)
                            ->getResult(0));
       } else {
         dims.push_back(IntegerAttr::get(IndexType::get(ctx),
@@ -2147,8 +2148,8 @@ struct SwapTransposeWithBroadcast : OpRewritePattern<linalg::TransposeOp> {
     }
     SmallVector<OpFoldResult> transposeResultShapes =
         applyPermutation(dims, resultPerms);
-    Value transposeInit = rewriter.create<tensor::EmptyOp>(
-        transposeOp.getLoc(), transposeResultShapes,
+    Value transposeInit = tensor::EmptyOp::create(
+        rewriter, transposeOp.getLoc(), transposeResultShapes,
         broadcastInputTy.getElementType());
 
     // Create broadcast(transpose(input)).
@@ -2547,7 +2548,7 @@ struct FoldTensorCastConsumerOp : public OpRewritePattern<tensor::CastOp> {
     // continue to propagate as far up the stack as it can go.
     OpOperand *outOperand = linalgOp.getDpsInitOperand(resultNumber);
     Value newOperand =
-        rewriter.create<tensor::CastOp>(loc, resultType, outOperand->get());
+        tensor::CastOp::create(rewriter, loc, resultType, outOperand->get());
     SmallVector<Value> newOperands = linalgOp.getDpsInputs();
     SmallVector<Value> outputOperands(linalgOp.getDpsInits().begin(),
                                       linalgOp.getDpsInits().end());
@@ -2560,8 +2561,8 @@ struct FoldTensorCastConsumerOp : public OpRewritePattern<tensor::CastOp> {
     Operation *newOp = clone(rewriter, linalgOp, resultTypes, newOperands);
 
     // Create a tensor.cast operation back to the original type.
-    Value castBack = rewriter.create<tensor::CastOp>(
-        loc, resultValue.getType(), newOp->getResult(resultNumber));
+    Value castBack = tensor::CastOp::create(
+        rewriter, loc, resultValue.getType(), newOp->getResult(resultNumber));
 
     SmallVector<Value> results(newOp->result_begin(), newOp->result_end());
     results[resultNumber] = castBack;
@@ -2653,7 +2654,7 @@ static void createNewOperandWithStaticSizes(
     changeNeeded = true;
     // Get the new operand value given its size and element type by
     // casting it.
-    Value newOperand = rewriter.create<tensor::CastOp>(loc, resultType, src);
+    Value newOperand = tensor::CastOp::create(rewriter, loc, resultType, src);
     unsigned index = opOperand->getOperandNumber();
     newOperands[index] = newOperand;
   }
@@ -2718,7 +2719,7 @@ struct InferStaticShapeOfOperands : public OpInterfaceRewritePattern<LinalgOp> {
       Type oldType = oldResult.getType();
       replacements.push_back(
           (newType != oldType)
-              ? rewriter.create<tensor::CastOp>(loc, oldType, newResult)
+              ? tensor::CastOp::create(rewriter, loc, oldType, newResult)
               : newResult);
     }
     rewriter.replaceOp(linalgOp, replacements);
@@ -2756,8 +2757,8 @@ SmallVector<Range> SoftmaxOp::getIterationDomain(OpBuilder &builder) {
   int64_t operandRank = getInputOperandRank();
   SmallVector<Range> loopBounds(operandRank);
   Location loc = getLoc();
-  Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
-  Value one = builder.create<arith::ConstantIndexOp>(loc, 1);
+  Value zero = arith::ConstantIndexOp::create(builder, loc, 0);
+  Value one = arith::ConstantIndexOp::create(builder, loc, 1);
   Value source = getInput();
   for (auto dim : llvm::seq<int64_t>(0, operandRank)) {
     loopBounds[dim].offset = zero;
@@ -2924,11 +2925,11 @@ static Value reduce(OpBuilder &builder, Location loc, Value input, Value output,
          "We should have two maps: 1 for the input, 1 for the output");
   assert(indexingMaps[0].isIdentity() && "input map should be identity");
 
-  auto genericOp = builder.create<linalg::GenericOp>(
-      loc, output.getType(), input, output, indexingMaps, iteratorTypes,
-      [&](OpBuilder &b, Location loc, ValueRange args) {
-        Value result = b.create<T>(loc, args[0], args[1]);
-        b.create<linalg::YieldOp>(loc, result);
+  auto genericOp = linalg::GenericOp::create(
+      builder, loc, output.getType(), input, output, indexingMaps,
+      iteratorTypes, [&](OpBuilder &b, Location loc, ValueRange args) {
+        Value result = T::create(b, loc, args[0], args[1]);
+        linalg::YieldOp::create(b, loc, result);
       });
   return genericOp.getResult(0);
 }
@@ -2947,12 +2948,13 @@ static Value buildSubAndExpOp(OpBuilder &builder, Location loc, Value input,
   assert(indexingMaps[0].isIdentity() && "input map should be identity");
   // Add the affine map for the output argument.
   indexingMaps.push_back(indexingMaps[0]);
-  auto genericOp = builder.create<linalg::GenericOp>(
-      loc, input.getType(), ValueRange{input, max}, output, indexingMaps,
-      iteratorTypes, [&](OpBuilder &b, Location loc, ValueRange args) {
-        Value diff = b.create<arith::SubFOp>(loc, args[0], args[1]);
-        Value result = b.create<math::ExpOp>(loc, diff);
-        b.create<linalg::YieldOp>(loc, result);
+  auto genericOp = linalg::GenericOp::create(
+      builder, loc, input.getType(), ValueRange{input, max}, output,
+      indexingMaps, iteratorTypes,
+      [&](OpBuilder &b, Location loc, ValueRange args) {
+        Value diff = arith::SubFOp::create(b, loc, args[0], args[1]);
+        Value result = math::ExpOp::create(b, loc, diff);
+        linalg::YieldOp::create(b, loc, result);
       });
   return genericOp.getResult(0);
 }
@@ -2974,12 +2976,12 @@ static Value buildDivOp(OpBuilder &builder, Location loc, Value numerator,
   assert(indexingMaps[0].isIdentity() && "Numerator map should be identity");
   // Add the affine map for the output tensor.
   indexingMaps.push_back(indexingMaps[0]);
-  auto genericOp = builder.create<linalg::GenericOp>(
-      loc, numerator.getType(), ValueRange{numerator, denominator}, output,
-      indexingMaps, iteratorTypes,
+  auto genericOp = linalg::GenericOp::create(
+      builder, loc, numerator.getType(), ValueRange{numerator, denominator},
+      output, indexingMaps, iteratorTypes,
       [&](OpBuilder &b, Location loc, ValueRange args) {
-        Value result = b.create<arith::DivFOp>(loc, args[0], args[1]);
-        b.create<linalg::YieldOp>(loc, result);
+        Value result = arith::DivFOp::create(b, loc, args[0], args[1]);
+        linalg::YieldOp::create(b, loc, result);
       });
   return genericOp.getResult(0);
 }
@@ -3015,12 +3017,12 @@ FailureOr<SmallVector<Value>> SoftmaxOp::decomposeOperation(OpBuilder &b) {
   Value output = getOutput();
   dims.erase(dims.begin() + reductionDim);
   // Step 1: Compute max along dim.
-  Value outputReduce = b.create<tensor::EmptyOp>(loc, dims, elementType);
+  Value outputReduce = tensor::EmptyOp::create(b, loc, dims, elementType);
   Value neutralForMaxF = arith::getIdentityValue(arith::AtomicRMWKind::maxnumf,
                                                  elementType, b, loc,
                                                  /*useOnlyFiniteValue=*/true);
   Value neutralForMaxFInit =
-      b.create<linalg::FillOp>(loc, Value{neutralForMaxF}, outputReduce)
+      linalg::FillOp::create(b, loc, Value{neutralForMaxF}, outputReduce)
           .result();
   Value max =
       reduce<arith::MaxNumFOp>(b, loc, input, neutralForMaxFInit, reductionDim);
@@ -3032,7 +3034,7 @@ FailureOr<SmallVector<Value>> SoftmaxOp::decomposeOperation(OpBuilder &b) {
   Value zero = arith::getIdentityValue(arith::AtomicRMWKind::addf, elementType,
                                        b, loc, /*useOnlyFiniteValue=*/true);
   Value zeroInit =
-      b.create<linalg::FillOp>(loc, Value{zero}, outputReduce).result();
+      linalg::FillOp::create(b, loc, Value{zero}, outputReduce).result();
   Value denominator =
       reduce<arith::AddFOp>(b, loc, numerator, zeroInit, reductionDim);
 
@@ -3153,8 +3155,8 @@ FailureOr<TilingResult> WinogradFilterTransformOp::getTiledImplementation(
   int64_t filterRank = getFilterOperandRank();
   SmallVector<OpFoldResult> filterStrides(filterRank, oneAttr);
   Location loc = getLoc();
-  auto filterSlice = builder.create<tensor::ExtractSliceOp>(
-      loc, getFilter(), sliceOffsets, sliceSizes, filterStrides);
+  auto filterSlice = tensor::ExtractSliceOp::create(
+      builder, loc, getFilter(), sliceOffsets, sliceSizes, filterStrides);
   tiledOperands.emplace_back(filterSlice);
 
   SmallVector<OpFoldResult> resultOffsets, resultSizes;
@@ -3164,8 +3166,8 @@ FailureOr<TilingResult> WinogradFilterTransformOp::getTiledImplementation(
 
   int64_t outputRank = getOutputOperandRank();
   SmallVector<OpFoldResult> outputStrides(outputRank, oneAttr);
-  auto outputSlice = builder.create<tensor::ExtractSliceOp>(
-      loc, getOutput(), resultOffsets, resultSizes, outputStrides);
+  auto outputSlice = tensor::ExtractSliceOp::create(
+      builder, loc, getOutput(), resultOffsets, resultSizes, outputStrides);
   tiledOperands.emplace_back(outputSlice);
 
   SmallVector<Type> resultTypes;
@@ -3333,8 +3335,8 @@ WinogradInputTransformOp::getTiledImplementation(OpBuilder &builder,
       {sizes[getOutputNDim()], sizeH, sizeW, sizes[getOutputCDim()]});
   int64_t inputRank = getInputOperandRank();
   SmallVector<OpFoldResult> inputStrides(inputRank, oneAttr);
-  auto inputSlice = builder.create<tensor::ExtractSliceOp>(
-      loc, getInput(), sliceOffsets, sliceSizes, inputStrides);
+  auto inputSlice = tensor::ExtractSliceOp::create(
+      builder, loc, getInput(), sliceOffsets, sliceSizes, inputStrides);
   tiledOperands.emplace_back(inputSlice);
 
   SmallVector<OpFoldResult> resultOffsets, resultSizes;
@@ -3344,8 +3346,8 @@ WinogradInputTransformOp::getTiledImplementation(OpBuilder &builder,
 
   int64_t outputRank = getOutputOperandRank();
   SmallVector<OpFoldResult> outputStrides(outputRank, oneAttr);
-  auto outputSlice = builder.create<tensor::ExtractSliceOp>(
-      loc, getOutput(), resultOffsets, resultSizes, outputStrides);
+  auto outputSlice = tensor::ExtractSliceOp::create(
+      builder, loc, getOutput(), resultOffsets, resultSizes, outputStrides);
   tiledOperands.emplace_back(outputSlice);
 
   SmallVector<Type> resultTypes;
@@ -3504,8 +3506,8 @@ FailureOr<TilingResult> WinogradOutputTransformOp::getTiledImplementation(
                      sizes[getValueFDim()]});
   int64_t valueRank = getValueOperandRank();
   SmallVector<OpFoldResult> sliceStrides(valueRank, oneAttr);
-  auto valueSlice = builder.create<tensor::ExtractSliceOp>(
-      loc, getValue(), sliceOffsets, sliceSizes, sliceStrides);
+  auto valueSlice = tensor::ExtractSliceOp::create(
+      builder, loc, getValue(), sliceOffsets, sliceSizes, sliceStrides);
   tiledOperands.emplace_back(valueSlice);
 
   SmallVector<OpFoldResult> resultOffsets, resultSizes;
@@ -3515,8 +3517,8 @@ FailureOr<TilingResult> WinogradOutputTransformOp::getTiledImplementation(
 
   int64_t outputRank = getOutputOperandRank();
   SmallVector<OpFoldResult> strides(outputRank, oneAttr);
-  auto outputSlice = builder.create<tensor::ExtractSliceOp>(
-      loc, getOutput(), resultOffsets, resultSizes, strides);
+  auto outputSlice = tensor::ExtractSliceOp::create(
+      builder, loc, getOutput(), resultOffsets, resultSizes, strides);
   tiledOperands.emplace_back(outputSlice);
 
   SmallVector<Type> resultTypes;
@@ -4971,7 +4973,7 @@ Value PackOp::createDestinationTensor(OpBuilder &b, Location loc, Value source,
            llvm::cast<RankedTensorType>(source.getType()).getShape())) {
     if (ShapedType::isDynamic(value))
       mixedSizes.push_back(
-          b.create<tensor::DimOp>(loc, source, index).getResult());
+          tensor::DimOp::create(b, loc, source, index).getResult());
     else
       mixedSizes.push_back(b.getIndexAttr(value));
   }
@@ -4985,7 +4987,7 @@ Value PackOp::createDestinationTensor(OpBuilder &b, Location loc, Value source,
 
   mixedSizes.append(innerTileSizes.begin(), innerTileSizes.end());
   auto elemType = llvm::cast<ShapedType>(source.getType()).getElementType();
-  return b.create<tensor::EmptyOp>(loc, mixedSizes, elemType);
+  return tensor::EmptyOp::create(b, loc, mixedSizes, elemType);
 }
 
 PackOp PackOp::createTransposedClone(OpBuilder &b, Location loc,
@@ -4996,9 +4998,9 @@ PackOp PackOp::createTransposedClone(OpBuilder &b, Location loc,
   Value transposedDest =
       createDestinationTensor(b, loc, getSource(), metadata.innerTiles,
                               metadata.innerDimsPos, metadata.outerDimsPerm);
-  return b.create<PackOp>(loc, getSource(), transposedDest,
-                          metadata.innerDimsPos, metadata.innerTiles,
-                          getPaddingValue(), metadata.outerDimsPerm);
+  return PackOp::create(b, loc, getSource(), transposedDest,
+                        metadata.innerDimsPos, metadata.innerTiles,
+                        getPaddingValue(), metadata.outerDimsPerm);
 }
 
 /// Returns true if the tiles and the tiled dims are constant.
@@ -5138,7 +5140,7 @@ LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
     if (srcShape != packOp.getSourceType().getShape()) {
       auto newSrcType = packOp.getSourceType().clone(srcShape);
       source =
-          rewriter.create<tensor::CastOp>(loc, newSrcType, packOp.getSource());
+          tensor::CastOp::create(rewriter, loc, newSrcType, packOp.getSource());
     }
     Value dest = packOp.getDest();
     RankedTensorType originalResultType = packOp.getDestType();
@@ -5146,7 +5148,7 @@ LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
     if (needUpdateDestType) {
       auto newDestType = packOp.getDestType().clone(destShape);
       dest =
-          rewriter.create<tensor::CastOp>(loc, newDestType, packOp.getDest());
+          tensor::CastOp::create(rewriter, loc, newDestType, packOp.getDest());
     }
     rewriter.modifyOpInPlace(packOp, [&] {
       packOp.getSourceMutable().assign(source);
@@ -5157,7 +5159,7 @@ LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
     if (needUpdateDestType) {
       rewriter.setInsertionPointAfter(packOp);
       auto castOp =
-          rewriter.create<tensor::CastOp>(loc, originalResultType, packOp);
+          tensor::CastOp::create(rewriter, loc, originalResultType, packOp);
       rewriter.replaceAllUsesExcept(packOp, castOp, castOp);
     }
     return success();
@@ -5250,18 +5252,20 @@ struct FoldTensorCastPackOp : public OpRewritePattern<PackOp> {
     // TODO: Strictly speaking, discardable attributes should be _discarded_ at
     // this point. However, in practice, we use them for things that we'd like
     // to preserve. Implement a better abstraction.
-    PackOp newOp = rewriter.create<PackOp>(
-        op.getLoc(), newOperands[0], newOperands[1], op.getInnerDimsPos(),
-        newMixedTileSizes, op.getPaddingValue(), op.getOuterDimsPerm());
+    PackOp newOp =
+        PackOp::create(rewriter, op.getLoc(), newOperands[0], newOperands[1],
+                       op.getInnerDimsPos(), newMixedTileSizes,
+                       op.getPaddingValue(), op.getOuterDimsPerm());
     newOp->setDiscardableAttrs(op->getDiscardableAttrDictionary());
 
     // Replace op.
     Value oldResult = op.getResult();
     Value newResult = newOp.getResult();
-    Value replacement = (newResult.getType() != oldResult.getType())
-                            ? rewriter.create<tensor::CastOp>(
-                                  op->getLoc(), oldResult.getType(), newResult)
-                            : newResult;
+    Value replacement =
+        (newResult.getType() != oldResult.getType())
+            ? tensor::CastOp::create(rewriter, op->getLoc(),
+                                     oldResult.getType(), newResult)
+            : newResult;
 
     rewriter.replaceOp(op, {replacement});
 
@@ -5358,7 +5362,8 @@ Value UnPackOp::createDestinationTensor(OpBuilder &b, Location loc,
   for (auto i :
        llvm::seq<unsigned>(0, srcType.getRank() - innerTileSizes.size())) {
     if (srcType.isDynamicDim(i))
-      mixedSizes.push_back(b.create<tensor::DimOp>(loc, source, i).getResult());
+      mixedSizes.push_back(
+          tensor::DimOp::create(b, loc, source, i).getResult());
     else
       mixedSizes.push_back(b.getIndexAttr(srcType.getDimSize(i)));
   }
@@ -5371,7 +5376,7 @@ Value UnPackOp::createDestinationTensor(OpBuilder &b, Location loc,
     mixedSizes[dimPos] = dimMul(mixedSizes[dimPos], tileSize);
 
   auto elemType = srcType.getElementType();
-  return b.create<tensor::EmptyOp>(loc, mixedSizes, elemType);
+  return tensor::EmptyOp::create(b, loc, mixedSizes, elemType);
 }
 
 UnPackOp UnPackOp::createTransposedClone(OpBuilder &b, Location loc,
@@ -5380,9 +5385,9 @@ UnPackOp UnPackOp::createTransposedClone(OpBuilder &b, Location loc,
                                          ArrayRef<int64_t> outerPermutation) {
   PackOrUnPackTransposeResult metadata = commonPermutationOfPackAndUnPackOp(
       *this, innerPermutation, outerPermutation);
-  return b.create<UnPackOp>(loc, transposedSource, getDest(),
-                            metadata.innerDimsPos, metadata.innerTiles,
-                            metadata.outerDimsPerm);
+  return UnPackOp::create(b, loc, transposedSource, getDest(),
+                          metadata.innerDimsPos, metadata.innerTiles,
+                          metadata.outerDimsPerm);
 }
 
 /// Returns true if the `srcShape` or `destShape` is different from the one in
@@ -5454,8 +5459,8 @@ LogicalResult UnPackOp::canonicalize(UnPackOp unPackOp,
             extractSliceUser.getResultType().getRank()) {
       OpBuilder::InsertionGuard g(rewriter);
       rewriter.setInsertionPoint(unPackOp);
-      auto newDest = rewriter.create<tensor::ExtractSliceOp>(
-          unPackOp->getLoc(), unPackOp.getDest(),
+      auto newDest = tensor::ExtractSliceOp::create(
+          rewriter, unPackOp->getLoc(), unPackOp.getDest(),
           extractSliceUser.getMixedOffsets(), extractSliceUser.getMixedSizes(),
           extractSliceUser.getMixedStrides());
       rewriter.modifyOpInPlace(unPackOp, [&]() {
@@ -5474,18 +5479,18 @@ LogicalResult UnPackOp::canonicalize(UnPackOp unPackOp,
     Value source = unPackOp.getSource();
     if (srcShape != unPackOp.getSourceType().getShape()) {
       auto newSrcType = unPackOp.getSourceType().clone(srcShape);
-      source = rewriter.create<tensor::CastOp>(loc, newSrcType,
-                                               unPackOp.getSource());
+      source = tensor::CastOp::create(rewriter, loc, newSrcType,
+                                      unPackOp.getSource());
     }
     Value dest = unPackOp.getDest();
     if (destShape != unPackOp.getDestType().getShape()) {
       auto newDestType = unPackOp.getDestType().clone(destShape);
-      dest =
-          rewriter.create<tensor::CastOp>(loc, newDestType, unPackOp.getDest());
+      dest = tensor::CastOp::create(rewriter, loc, newDestType,
+                                    unPackOp.getDest());
     }
-    Value newOp = rewriter.create<UnPackOp>(
-        loc, source, dest, unPackOp.getInnerDimsPos(), unPackOp.getMixedTiles(),
-        unPackOp.getOuterDimsPerm());
+    Value newOp = UnPackOp::create(
+        rewriter, loc, source, dest, unPackOp.getInnerDimsPos(),
+        unPackOp.getMixedTiles(), unPackOp.getOuterDimsPerm());
     rewriter.replaceOpWithNewOp<tensor::CastOp>(
         unPackOp, unPackOp.getResult().getType(), newOp);
     return success();
@@ -5542,18 +5547,19 @@ struct FoldTensorCastUnPackOp : public OpRewritePattern<UnPackOp> {
     // TODO: Strictly speaking, discardable attributes should be _discarded_ at
     // this point. However, in practice, we use them for things that we'd like
     // to preserve. Implement a better abstraction.
-    UnPackOp newOp = rewriter.create<UnPackOp>(
-        op.getLoc(), sourceTensor, newOperands[1], op.getInnerDimsPos(),
-        newMixedTileSizes, op.getOuterDimsPerm());
+    UnPackOp newOp = UnPackOp::create(rewriter, op.getLoc(), sourceTensor,
+                                      newOperands[1], op.getInnerDimsPos(),
+                                      newMixedTileSizes, op.getOuterDimsPerm());
     newOp->setDiscardableAttrs(op->getDiscardableAttrDictionary());
 
     // Replace op.
     Value oldResult = op.getResult();
     Value newResult = newOp.getResult();
-    Value replacement = (newResult.getType() != oldResult.getType())
-                            ? rewriter.create<tensor::CastOp>(
-                                  op->getLoc(), oldResult.getType(), newResult)
-                            : newResult;
+    Value replacement =
+        (newResult.getType() != oldResult.getType())
+            ? tensor::CastOp::create(rewriter, op->getLoc(),
+                                     oldResult.getType(), newResult)
+            : newResult;
 
     rewriter.replaceOp(op, {replacement});
 
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 109e5b7f95ec0..e3ce0e1e48376 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -672,9 +672,10 @@ static Operation *replaceForAllWithNewSignature(
   newOuts.push_back(outputs[resultNumber]);
 
   // Create new scf.forall op
-  auto newforallOp = rewriter.create<scf::ForallOp>(
-      loc, forallOp.getMixedLowerBound(), forallOp.getMixedUpperBound(),
-      forallOp.getMixedStep(), newOuts, forallOp.getMapping());
+  auto newforallOp = scf::ForallOp::create(
+      rewriter, loc, forallOp.getMixedLowerBound(),
+      forallOp.getMixedUpperBound(), forallOp.getMixedStep(), newOuts,
+      forallOp.getMapping());
   rewriter.eraseBlock(newforallOp.getBody());
   newforallOp.getRegion().takeBody(forallOp.getRegion());
 
@@ -699,8 +700,8 @@ static Operation *replaceForAllWithNewSignature(
   Value src = tileAndFuseResult.tiledValues[0];
   Value dst = newforallOp.getRegionIterArgs().back();
   SmallVector<OpFoldResult> strides(offsets.size(), rewriter.getIndexAttr(1));
-  rewriter.create<tensor::ParallelInsertSliceOp>(firstYieldOp->getLoc(), src,
-                                                 dst, offsets, sizes, strides);
+  tensor::ParallelInsertSliceOp::create(rewriter, firstYieldOp->getLoc(), src,
+                                        dst, offsets, sizes, strides);
 
   for (auto result : llvm::enumerate(forallOp.getResults())) {
     rewriter.replaceAllUsesWith(result.value(),
@@ -3410,12 +3411,12 @@ transform::TileUsingForOp::apply(transform::TransformRewriter &rewriter,
         for (auto [ofrIdx, ofr] : llvm::enumerate(getMixedSizes())) {
           if (auto attr = llvm::dyn_cast_if_present<Attribute>(ofr)) {
             if (scalableSizes[ofrIdx]) {
-              auto val = b.create<arith::ConstantIndexOp>(
-                  getLoc(), cast<IntegerAttr>(attr).getInt());
+              auto val = arith::ConstantIndexOp::create(
+                  b, getLoc(), cast<IntegerAttr>(attr).getInt());
               Value vscale =
-                  b.create<vector::VectorScaleOp>(getLoc(), b.getIndexType());
+                  vector::VectorScaleOp::create(b, getLoc(), b.getIndexType());
               sizes.push_back(
-                  b.create<arith::MulIOp>(getLoc(), val, vscale).getResult());
+                  arith::MulIOp::create(b, getLoc(), val, vscale).getResult());
             } else {
               sizes.push_back(attr);
             }
@@ -3626,9 +3627,10 @@ static scf::ForallOp normalizeForallLoopOp(RewriterBase &rewriter,
   SmallVector<OpFoldResult> normalizedSteps(normalizedUbs.size(),
                                             rewriter.getIndexAttr(1));
 
-  auto normalizedForallOp = rewriter.create<scf::ForallOp>(
-      loc, normalizedLbs, normalizedUbs, normalizedSteps, loop.getOutputs(),
-      loop.getMapping(), [](OpBuilder &, Location, ValueRange) {});
+  auto normalizedForallOp = scf::ForallOp::create(
+      rewriter, loc, normalizedLbs, normalizedUbs, normalizedSteps,
+      loop.getOutputs(), loop.getMapping(),
+      [](OpBuilder &, Location, ValueRange) {});
 
   auto normalizedLoopIvs = normalizedForallOp.getInductionVars();
   OpBuilder::InsertionGuard g(rewriter);
@@ -4131,8 +4133,8 @@ DiagnosedSilenceableFailure doit(RewriterBase &rewriter, OpTy target,
         target->template getParentOfType<scf::InParallelOp>());
   }
 
-  Value extracted = rewriter.create<tensor::ExtractSliceOp>(
-      target.getLoc(), target.getDest(), target.getMixedOffsets(),
+  Value extracted = tensor::ExtractSliceOp::create(
+      rewriter, target.getLoc(), target.getDest(), target.getMixedOffsets(),
       target.getMixedSizes(), target.getMixedStrides());
   Value copied = rewriter
                      .create<linalg::CopyOp>(target.getLoc(),
diff --git a/mlir/lib/Dialect/Linalg/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/BufferizableOpInterfaceImpl.cpp
index 1f6d96ca0f81f..3512ecd9d2eb2 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -184,9 +184,9 @@ struct SoftmaxOpInterface
         getBuffer(rewriter, softmaxOp.getOutput(), options, state);
     if (failed(outputBuffer))
       return failure();
-    rewriter.create<linalg::SoftmaxOp>(softmaxOp.getLoc(),
-                                       /*result=*/TypeRange(), *inputBuffer,
-                                       *outputBuffer, softmaxOp.getDimension());
+    linalg::SoftmaxOp::create(rewriter, softmaxOp.getLoc(),
+                              /*result=*/TypeRange(), *inputBuffer,
+                              *outputBuffer, softmaxOp.getDimension());
     replaceOpWithBufferizedValues(rewriter, op, *outputBuffer);
     return success();
   }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp
index a7732b939e70d..d1eb27099db61 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp
@@ -30,10 +30,10 @@ static bool hasAllOneValues(DenseIntElementsAttr attr) {
 
 static Value createAdd(Location loc, Value x, Value y, OpBuilder &builder) {
   if (isa<IntegerType>(x.getType()))
-    return builder.create<arith::AddIOp>(loc, x, y);
+    return arith::AddIOp::create(builder, loc, x, y);
   if (isa<ComplexType>(x.getType()))
-    return builder.create<complex::AddOp>(loc, x, y);
-  return builder.create<arith::AddFOp>(loc, x, y);
+    return complex::AddOp::create(builder, loc, x, y);
+  return arith::AddFOp::create(builder, loc, x, y);
 }
 
 static Value createMul(Location loc, Value x, Value y, Type accType,
@@ -44,10 +44,10 @@ static Value createMul(Location loc, Value x, Value y, Type accType,
   Value yConvert =
       convertScalarToDtype(builder, loc, y, accType, /*isUnsignedCast=*/false);
   if (isa<ComplexType>(accType))
-    return builder.create<complex::MulOp>(loc, xConvert, yConvert);
+    return complex::MulOp::create(builder, loc, xConvert, yConvert);
   if (isa<IntegerType>(accType))
-    return builder.create<arith::MulIOp>(loc, xConvert, yConvert);
-  return builder.create<arith::MulFOp>(loc, xConvert, yConvert);
+    return arith::MulIOp::create(builder, loc, xConvert, yConvert);
+  return arith::MulFOp::create(builder, loc, xConvert, yConvert);
 }
 
 // Delinearizes the given composite `index` by the basis specified in `factors`.
@@ -56,7 +56,7 @@ static SmallVector<Value> unrollIndex(OpBuilder &b, Location loc, Value index,
   assert(!factors.empty() && "empty factor list");
   SmallVector<Value> basis;
   for (int64_t f : factors)
-    basis.push_back(b.create<arith::ConstantOp>(loc, b.getIndexAttr(f)));
+    basis.push_back(arith::ConstantOp::create(b, loc, b.getIndexAttr(f)));
   FailureOr<SmallVector<Value>> multiIndex =
       affine::delinearizeIndex(b, loc, index, basis);
   assert(!failed(multiIndex) && "Failed to linearize img2col index");
@@ -115,18 +115,18 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) {
   SmallVector<ReassociationIndices> filterReassocIndices = {{0, 1, 2}, {3}};
   auto reshapedFilterType =
       RankedTensorType::get({fh * fw * ic, oc}, filterType.getElementType());
-  Value reshapedFilter = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedFilterType, filter, filterReassocIndices);
+  Value reshapedFilter = tensor::CollapseShapeOp::create(
+      rewriter, loc, reshapedFilterType, filter, filterReassocIndices);
 
   SmallVector<ReassociationIndices> outputReassocIndices = {{0}, {1, 2}, {3}};
   RankedTensorType reshapedOutputType =
       RankedTensorType::get({n, oh * ow, oc}, outputType.getElementType());
-  Value reshapedOutput = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedOutputType, output, outputReassocIndices);
+  Value reshapedOutput = tensor::CollapseShapeOp::create(
+      rewriter, loc, reshapedOutputType, output, outputReassocIndices);
 
   SmallVector<int64_t> colTensorShape = {n, oh * ow, fh * fw * ic};
-  Value colTensor = rewriter.create<tensor::EmptyOp>(
-      loc, colTensorShape, inputType.getElementType());
+  Value colTensor = tensor::EmptyOp::create(rewriter, loc, colTensorShape,
+                                            inputType.getElementType());
 
   // Convert the input to a (BMK) column tensor.
   auto nloops = colTensorShape.size();
@@ -138,15 +138,15 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) {
   SmallVector<AffineMap> img2colIndexingMaps = {
       AffineMap::getMultiDimIdentityMap(nloops, context)};
 
-  auto img2ColTensor = rewriter.create<linalg::GenericOp>(
-      loc, colTensor.getType(),
+  auto img2ColTensor = linalg::GenericOp::create(
+      rewriter, loc, colTensor.getType(),
       /*inputs=*/ValueRange{}, /*outputs=*/colTensor, img2colIndexingMaps,
       img2colIterators,
       [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
         // Get the iterators named based on the matmul (batch, m, k).
-        Value bIndex = nestedBuilder.create<linalg::IndexOp>(loc, 0);
-        Value mIndex = nestedBuilder.create<linalg::IndexOp>(loc, 1);
-        Value kIndex = nestedBuilder.create<linalg::IndexOp>(loc, 2);
+        Value bIndex = linalg::IndexOp::create(nestedBuilder, loc, 0);
+        Value mIndex = linalg::IndexOp::create(nestedBuilder, loc, 1);
+        Value kIndex = linalg::IndexOp::create(nestedBuilder, loc, 2);
 
         // Recover the original iteration indices from the problem/input sizes.
         SmallVector<Value> mIndices = unrollIndex(
@@ -170,9 +170,9 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) {
 
         // im2col[n, oh*ow, fh*fw*ic] = input[n, sh*oh + fh, sw*ow + fw, ic]
         SmallVector<Value> extractionIndices{bIndex, hIndex, wIndex, icIndex};
-        Value inputVal = nestedBuilder.create<tensor::ExtractOp>(
-            loc, input, extractionIndices);
-        nestedBuilder.create<linalg::YieldOp>(nestedLoc, inputVal);
+        Value inputVal = tensor::ExtractOp::create(nestedBuilder, loc, input,
+                                                   extractionIndices);
+        linalg::YieldOp::create(nestedBuilder, nestedLoc, inputVal);
       });
 
   // Because the filter does not share the same batch dimension,
@@ -187,8 +187,8 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) {
   SmallVector<utils::IteratorType> genericIterators = {parallel, parallel,
                                                        parallel, reduction};
 
-  auto genericOp = rewriter.create<linalg::GenericOp>(
-      loc, reshapedOutputType,
+  auto genericOp = linalg::GenericOp::create(
+      rewriter, loc, reshapedOutputType,
       /*inputs=*/ValueRange{img2ColTensor.getResult(0), reshapedFilter},
       /*outputs=*/ValueRange{reshapedOutput},
       ArrayRef<AffineMap>{lhsMap, rhsMap, resultMap}, genericIterators,
@@ -196,12 +196,12 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) {
         Value mul =
             createMul(loc, args[0], args[1], args[2].getType(), nestedBuilder);
         Value add = createAdd(loc, mul, args[2], nestedBuilder);
-        nestedBuilder.create<linalg::YieldOp>(nestedLoc, add);
+        linalg::YieldOp::create(nestedBuilder, nestedLoc, add);
       });
   Value result = genericOp.getResults().front();
 
-  auto reshapedResult = rewriter.create<tensor::ExpandShapeOp>(
-      loc, outputType, result, outputReassocIndices);
+  auto reshapedResult = tensor::ExpandShapeOp::create(
+      rewriter, loc, outputType, result, outputReassocIndices);
 
   rewriter.replaceOp(convOp, ArrayRef<Value>{reshapedResult});
 
@@ -244,8 +244,8 @@ rewriteInIm2Col(RewriterBase &rewriter,
     SmallVector<int64_t> targetShape = llvm::to_vector<4>(llvm::map_range(
         indices, [&](int64_t index) -> int64_t { return inputShape[index]; }));
 
-    Value outputTensor = rewriter.create<tensor::EmptyOp>(
-        loc, targetShape, operandTensorType.getElementType());
+    Value outputTensor = tensor::EmptyOp::create(
+        rewriter, loc, targetShape, operandTensorType.getElementType());
 
     SmallVector<utils::IteratorType> loopAttributeTypes(
         nloops, utils::IteratorType::parallel);
@@ -255,12 +255,12 @@ rewriteInIm2Col(RewriterBase &rewriter,
             AffineMap::get(nloops, 0, exprs, rewriter.getContext())),
         AffineMap::getMultiDimIdentityMap(nloops, rewriter.getContext())};
 
-    auto transposedOp = rewriter.create<linalg::GenericOp>(
-        loc, outputTensor.getType(),
+    auto transposedOp = linalg::GenericOp::create(
+        rewriter, loc, outputTensor.getType(),
         /*inputs=*/operand, /*outputs=*/outputTensor, indexingMaps,
         loopAttributeTypes,
         [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
-          nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
+          linalg::YieldOp::create(nestedBuilder, nestedLoc, args[0]);
         });
 
     return transposedOp.getResult(0);
@@ -307,15 +307,15 @@ rewriteInIm2Col(RewriterBase &rewriter,
       AffineMap::get(nloops, 0, inputExprs, rewriter.getContext()),
       AffineMap::getMultiDimIdentityMap(nloops, rewriter.getContext())};
 
-  Value colTensor = rewriter.create<tensor::EmptyOp>(
-      loc, colTensorShape, inputType.getElementType());
+  Value colTensor = tensor::EmptyOp::create(rewriter, loc, colTensorShape,
+                                            inputType.getElementType());
 
-  auto img2ColTensor = rewriter.create<linalg::GenericOp>(
-      loc, colTensor.getType(),
+  auto img2ColTensor = linalg::GenericOp::create(
+      rewriter, loc, colTensor.getType(),
       /*inputs=*/inputT, /*outputs=*/colTensor, indexingMaps,
       loopAttributeTypes,
       [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
-        nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
+        linalg::YieldOp::create(nestedBuilder, nestedLoc, args[0]);
       });
 
   SmallVector<ReassociationIndices> img2ColTensorReassocIndices = {
@@ -331,26 +331,27 @@ rewriteInIm2Col(RewriterBase &rewriter,
   auto reshapedOutputTensorType =
       RankedTensorType::get({n * c, oh * ow}, outputType.getElementType());
 
-  Value reshapedImg2ColTensor = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedImg2ColTensorType, img2ColTensor.getResult(0),
+  Value reshapedImg2ColTensor = tensor::CollapseShapeOp::create(
+      rewriter, loc, reshapedImg2ColTensorType, img2ColTensor.getResult(0),
       img2ColTensorReassocIndices);
-  Value reshapedFilterTensor = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedFilterTensorType, filterT, filterReassociationIndice);
-  Value reshapedoutputTensor = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedOutputTensorType, transposedOutputTensor,
+  Value reshapedFilterTensor =
+      tensor::CollapseShapeOp::create(rewriter, loc, reshapedFilterTensorType,
+                                      filterT, filterReassociationIndice);
+  Value reshapedoutputTensor = tensor::CollapseShapeOp::create(
+      rewriter, loc, reshapedOutputTensorType, transposedOutputTensor,
       outputReassociationIndice);
 
-  auto batchMatVecResult = rewriter.create<linalg::BatchMatvecOp>(
-      loc, TypeRange{reshapedoutputTensor.getType()},
+  auto batchMatVecResult = linalg::BatchMatvecOp::create(
+      rewriter, loc, TypeRange{reshapedoutputTensor.getType()},
       ValueRange{reshapedImg2ColTensor, reshapedFilterTensor},
       ValueRange{reshapedoutputTensor});
 
   SmallVector<ReassociationIndices> batchMatVecReassociationIndice = {{0, 1},
                                                                       {2, 3}};
 
-  auto batchMatVecResultReshaped = rewriter.create<tensor::ExpandShapeOp>(
-      loc, transposedOutputTensor.getType(), batchMatVecResult.getResult(0),
-      batchMatVecReassociationIndice);
+  auto batchMatVecResultReshaped = tensor::ExpandShapeOp::create(
+      rewriter, loc, transposedOutputTensor.getType(),
+      batchMatVecResult.getResult(0), batchMatVecReassociationIndice);
 
   Value transposedResult =
       transposeOperand(batchMatVecResultReshaped, {0, 2, 3, 1});
@@ -400,19 +401,19 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) {
   SmallVector<ReassociationIndices> filterReassocIndices = {{0}, {1, 2, 3}};
   auto reshapedFilterType =
       RankedTensorType::get({oc, ic * fh * fw}, inputType.getElementType());
-  Value reshapedFilter = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedFilterType, filter, filterReassocIndices);
+  Value reshapedFilter = tensor::CollapseShapeOp::create(
+      rewriter, loc, reshapedFilterType, filter, filterReassocIndices);
 
   SmallVector<ReassociationIndices> outputReassocIndices = {{0}, {1}, {2, 3}};
   auto reshapedOutputType =
       RankedTensorType::get({n, oc, oh * ow}, outputType.getElementType());
-  Value reshapedOutput = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedOutputType, output, outputReassocIndices);
+  Value reshapedOutput = tensor::CollapseShapeOp::create(
+      rewriter, loc, reshapedOutputType, output, outputReassocIndices);
 
   // Convert the input to a (BKN) tensor.
   SmallVector<int64_t, 4> colTensorShape = {n, ic * fh * fw, oh * ow};
-  Value colTensor = rewriter.create<tensor::EmptyOp>(
-      loc, colTensorShape, inputType.getElementType());
+  Value colTensor = tensor::EmptyOp::create(rewriter, loc, colTensorShape,
+                                            inputType.getElementType());
 
   auto nloops = colTensorShape.size();
 
@@ -423,15 +424,15 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) {
   SmallVector<AffineMap, 4> img2colIndexingMaps = {
       AffineMap::getMultiDimIdentityMap(nloops, context)};
 
-  auto img2ColTensor = rewriter.create<linalg::GenericOp>(
-      loc, colTensor.getType(),
+  auto img2ColTensor = linalg::GenericOp::create(
+      rewriter, loc, colTensor.getType(),
       /*inputs=*/ValueRange{}, /*outputs=*/colTensor, img2colIndexingMaps,
       img2colIterators,
       [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
         // Get the iterators named based on the matmul (batch, m, k).
-        Value bIndex = nestedBuilder.create<linalg::IndexOp>(loc, 0);
-        Value kIndex = nestedBuilder.create<linalg::IndexOp>(loc, 1);
-        Value nIndex = nestedBuilder.create<linalg::IndexOp>(loc, 2);
+        Value bIndex = linalg::IndexOp::create(nestedBuilder, loc, 0);
+        Value kIndex = linalg::IndexOp::create(nestedBuilder, loc, 1);
+        Value nIndex = linalg::IndexOp::create(nestedBuilder, loc, 2);
 
         // Recover the original iteration indices from the problem/input sizes.
         SmallVector<Value> kIndices = unrollIndex(
@@ -455,9 +456,9 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) {
 
         // im2col[n, ic*fh*fw, oh*ow] = input[n, ic, sh*oh + fh, sw*ow + fw]
         SmallVector<Value> extractionIndices{bIndex, icIndex, hIndex, wIndex};
-        Value inputVal = nestedBuilder.create<tensor::ExtractOp>(
-            loc, input, extractionIndices);
-        nestedBuilder.create<linalg::YieldOp>(nestedLoc, inputVal);
+        Value inputVal = tensor::ExtractOp::create(nestedBuilder, loc, input,
+                                                   extractionIndices);
+        linalg::YieldOp::create(nestedBuilder, nestedLoc, inputVal);
       });
 
   // Because the filter does not share the same batch dimension,
@@ -471,8 +472,8 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) {
   auto resultMap = AffineMap::get(4, 0, {bDim, mDim, nDim}, context);
   SmallVector<utils::IteratorType> genericIterators = {parallel, parallel,
                                                        parallel, reduction};
-  auto genericOp = rewriter.create<linalg::GenericOp>(
-      loc, reshapedOutputType,
+  auto genericOp = linalg::GenericOp::create(
+      rewriter, loc, reshapedOutputType,
       /*inputs=*/ValueRange{reshapedFilter, img2ColTensor.getResult(0)},
       /*outputs=*/ValueRange{reshapedOutput},
       ArrayRef<AffineMap>{lhsMap, rhsMap, resultMap}, genericIterators,
@@ -480,12 +481,12 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) {
         Value mul =
             createMul(loc, args[0], args[1], args[2].getType(), nestedBuilder);
         Value add = createAdd(loc, mul, args[2], nestedBuilder);
-        nestedBuilder.create<linalg::YieldOp>(nestedLoc, add);
+        linalg::YieldOp::create(nestedBuilder, nestedLoc, add);
       });
   Value result = genericOp.getResults().front();
 
-  auto reshapedResult = rewriter.create<tensor::ExpandShapeOp>(
-      loc, outputType, result, outputReassocIndices);
+  auto reshapedResult = tensor::ExpandShapeOp::create(
+      rewriter, loc, outputType, result, outputReassocIndices);
 
   rewriter.replaceOp(convOp, ArrayRef<Value>{reshapedResult});
 
@@ -535,18 +536,18 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) {
   SmallVector<ReassociationIndices> filterReassocIndices = {{0}, {1, 2, 3}};
   auto reshapedFilterType =
       RankedTensorType::get({oc, fh * fw * ic}, filterType.getElementType());
-  Value reshapedFilter = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedFilterType, filter, filterReassocIndices);
+  Value reshapedFilter = tensor::CollapseShapeOp::create(
+      rewriter, loc, reshapedFilterType, filter, filterReassocIndices);
 
   SmallVector<ReassociationIndices> outputReassocIndices = {{0}, {1, 2}, {3}};
   RankedTensorType reshapedOutputType =
       RankedTensorType::get({n, oh * ow, oc}, outputType.getElementType());
-  Value reshapedOutput = rewriter.create<tensor::CollapseShapeOp>(
-      loc, reshapedOutputType, output, outputReassocIndices);
+  Value reshapedOutput = tensor::CollapseShapeOp::create(
+      rewriter, loc, reshapedOutputType, output, outputReassocIndices);
 
   SmallVector<int64_t> colTensorShape = {n, oh * ow, fh * fw * ic};
-  Value colTensor = rewriter.create<tensor::EmptyOp>(
-      loc, colTensorShape, inputType.getElementType());
+  Value colTensor = tensor::EmptyOp::create(rewriter, loc, colTensorShape,
+                                            inputType.getElementType());
 
   // Convert the input to a (BMK) column tensor.
   auto nloops = colTensorShape.size();
@@ -558,15 +559,15 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) {
   SmallVector<AffineMap> img2colIndexingMaps = {
       AffineMap::getMultiDimIdentityMap(nloops, context)};
 
-  auto img2ColTensor = rewriter.create<linalg::GenericOp>(
-      loc, colTensor.getType(),
+  auto img2ColTensor = linalg::GenericOp::create(
+      rewriter, loc, colTensor.getType(),
       /*inputs=*/ValueRange{}, /*outputs=*/colTensor, img2colIndexingMaps,
       img2colIterators,
       [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
         // Get the iterators named based on the matmul (batch, m, k).
-        Value bIndex = nestedBuilder.create<linalg::IndexOp>(loc, 0);
-        Value mIndex = nestedBuilder.create<linalg::IndexOp>(loc, 1);
-        Value kIndex = nestedBuilder.create<linalg::IndexOp>(loc, 2);
+        Value bIndex = linalg::IndexOp::create(nestedBuilder, loc, 0);
+        Value mIndex = linalg::IndexOp::create(nestedBuilder, loc, 1);
+        Value kIndex = linalg::IndexOp::create(nestedBuilder, loc, 2);
 
         // Recover the original iteration indices from the problem/input sizes.
         SmallVector<Value> mIndices = unrollIndex(
@@ -590,9 +591,9 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) {
 
         // im2col[n, oh*ow, fh*fw*ic] = input[n, sh*oh + fh, sw*ow + fw, ic]
         SmallVector<Value> extractionIndices{bIndex, hIndex, wIndex, icIndex};
-        Value inputVal = nestedBuilder.create<tensor::ExtractOp>(
-            loc, input, extractionIndices);
-        nestedBuilder.create<linalg::YieldOp>(nestedLoc, inputVal);
+        Value inputVal = tensor::ExtractOp::create(nestedBuilder, loc, input,
+                                                   extractionIndices);
+        linalg::YieldOp::create(nestedBuilder, nestedLoc, inputVal);
       });
 
   // Because we didn't transpose the filters we don't actually have a batched
@@ -606,8 +607,8 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) {
   SmallVector<utils::IteratorType> genericIterators = {parallel, parallel,
                                                        parallel, reduction};
 
-  auto genericOp = rewriter.create<linalg::GenericOp>(
-      loc, reshapedOutputType,
+  auto genericOp = linalg::GenericOp::create(
+      rewriter, loc, reshapedOutputType,
       /*inputs=*/ValueRange{img2ColTensor.getResult(0), reshapedFilter},
       /*outputs=*/ValueRange{reshapedOutput},
       ArrayRef<AffineMap>{lhsMap, rhsMap, resultMap}, genericIterators,
@@ -615,12 +616,12 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) {
         Value mul =
             createMul(loc, args[0], args[1], args[2].getType(), nestedBuilder);
         Value add = createAdd(loc, mul, args[2], nestedBuilder);
-        nestedBuilder.create<linalg::YieldOp>(nestedLoc, add);
+        linalg::YieldOp::create(nestedBuilder, nestedLoc, add);
       });
   Value result = genericOp.getResults().front();
 
-  auto reshapedResult = rewriter.create<tensor::ExpandShapeOp>(
-      loc, outputType, result, outputReassocIndices);
+  auto reshapedResult = tensor::ExpandShapeOp::create(
+      rewriter, loc, outputType, result, outputReassocIndices);
 
   rewriter.replaceOp(convOp, ArrayRef<Value>{reshapedResult});
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
index 39e2aac27e213..76ddee4f0e9cf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
@@ -37,8 +37,8 @@ static Value createInserts(RewriterBase &rewriter, Location loc, int dim,
   if (dim == static_cast<int>(shape.size()) - 1) {
     for (int i = 0; i < shape.back(); ++i) {
       indices.back() = constants[i];
-      destination = rewriter.create<tensor::InsertOp>(loc, *elementIt,
-                                                      destination, indices);
+      destination = tensor::InsertOp::create(rewriter, loc, *elementIt,
+                                             destination, indices);
       ++elementIt;
     }
     return destination;
@@ -65,27 +65,27 @@ static void createMemcpy(OpBuilder &b, Location loc, Value tensorSource,
       MaterializeInDestination: {
     // Note: This is the preferred way of memcpy'ing because no layout map
     // and/or memory space must be specified for the source.
-    auto materializeOp = b.create<bufferization::MaterializeInDestinationOp>(
-        loc, tensorSource, memrefDest);
+    auto materializeOp = bufferization::MaterializeInDestinationOp::create(
+        b, loc, tensorSource, memrefDest);
     materializeOp.setWritable(true);
   } break;
   case linalg::BufferizeToAllocationOptions::MemcpyOp::MemrefCopy: {
     // TODO: Support custom memory space on source.
     // We do not know the layout map of the source yet, so use a fully dynamic
     // layout for best compatibility.
-    Value toBuffer = b.create<bufferization::ToBufferOp>(
-        loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
+    Value toBuffer = bufferization::ToBufferOp::create(
+        b, loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
         tensorSource, /*readOnly=*/true);
-    b.create<memref::CopyOp>(loc, toBuffer, memrefDest);
+    memref::CopyOp::create(b, loc, toBuffer, memrefDest);
   } break;
   case linalg::BufferizeToAllocationOptions::MemcpyOp::LinalgCopy: {
     // TODO: Support custom memory space on source.
     // We do not know the layout map of the source yet, so use a fully dynamic
     // layout for best compatibility.
-    Value toBuffer = b.create<bufferization::ToBufferOp>(
-        loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
+    Value toBuffer = bufferization::ToBufferOp::create(
+        b, loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
         tensorSource, /*readOnly=*/true);
-    b.create<linalg::CopyOp>(loc, toBuffer, memrefDest);
+    linalg::CopyOp::create(b, loc, toBuffer, memrefDest);
   } break;
   };
 }
@@ -120,15 +120,15 @@ static Operation *movePaddingToFillOrGenericOp(RewriterBase &rewriter,
             ->materializeConstant(rewriter, constYieldedValue,
                                   yieldedValue.getType(), yieldedValue.getLoc())
             ->getResult(0);
-    auto fillOp = rewriter.create<linalg::FillOp>(loc, ValueRange(fillValue),
-                                                  ValueRange(dest));
+    auto fillOp = linalg::FillOp::create(rewriter, loc, ValueRange(fillValue),
+                                         ValueRange(dest));
     return fillOp;
   }
 
   if (invariantYieldedValue) {
     // Padding with an invariant value.
-    auto fillOp = rewriter.create<linalg::FillOp>(loc, ValueRange(yieldedValue),
-                                                  ValueRange(dest));
+    auto fillOp = linalg::FillOp::create(
+        rewriter, loc, ValueRange(yieldedValue), ValueRange(dest));
     return fillOp;
   }
 
@@ -137,8 +137,8 @@ static Operation *movePaddingToFillOrGenericOp(RewriterBase &rewriter,
                                                  utils::IteratorType::parallel);
   SmallVector<AffineMap> indexingMaps(
       1, rewriter.getMultiDimIdentityMap(resultType.getRank()));
-  auto genericOp = rewriter.create<linalg::GenericOp>(
-      loc, resultType, /*inputs=*/ValueRange(),
+  auto genericOp = linalg::GenericOp::create(
+      rewriter, loc, resultType, /*inputs=*/ValueRange(),
       /*outputs=*/ValueRange{dest}, /*indexingMaps=*/
       indexingMaps, iteratorTypes);
   Block *body = rewriter.createBlock(&genericOp->getRegion(0), {},
@@ -146,7 +146,7 @@ static Operation *movePaddingToFillOrGenericOp(RewriterBase &rewriter,
   rewriter.setInsertionPointToStart(body);
   SmallVector<Value> bbArgReplacements;
   for (int64_t i = 0; i < resultType.getRank(); ++i)
-    bbArgReplacements.push_back(rewriter.create<linalg::IndexOp>(loc, i));
+    bbArgReplacements.push_back(linalg::IndexOp::create(rewriter, loc, i));
   rewriter.mergeBlocks(padOp.getBody(), body, bbArgReplacements);
 
   // Update terminator.
@@ -179,8 +179,8 @@ static SmallVector<Value> reifyOrComputeDynamicSizes(OpBuilder &b,
   for (int64_t i = 0; i < tensorType.getRank(); ++i) {
     if (tensorType.isDynamicDim(i))
       dynSizes.push_back(
-          b.create<DimOp>(value.getLoc(), value,
-                          b.create<arith::ConstantIndexOp>(value.getLoc(), i)));
+          DimOp::create(b, value.getLoc(), value,
+                        arith::ConstantIndexOp::create(b, value.getLoc(), i)));
   }
   return dynSizes;
 }
@@ -201,15 +201,15 @@ createAllocationForTensor(RewriterBase &rewriter, Location loc, Value value,
   Value alloc;
   if (options.allocOp ==
       linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloc) {
-    alloc = rewriter.create<memref::AllocOp>(loc, memrefType, dynamicSizes);
+    alloc = memref::AllocOp::create(rewriter, loc, memrefType, dynamicSizes);
     if (options.emitDealloc) {
       // Place deallocation at the end of the block.
       rewriter.setInsertionPoint(rewriter.getInsertionBlock()->getTerminator());
-      rewriter.create<memref::DeallocOp>(loc, alloc);
+      memref::DeallocOp::create(rewriter, loc, alloc);
     }
   } else if (options.allocOp ==
              linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloca) {
-    alloc = rewriter.create<memref::AllocaOp>(loc, memrefType, dynamicSizes);
+    alloc = memref::AllocaOp::create(rewriter, loc, memrefType, dynamicSizes);
     // No dealloc is needed.
   }
 
@@ -243,14 +243,14 @@ Value linalg::bufferizeToAllocation(
       getMixedSizes(rewriter, loc, padOp.getSource());
   SmallVector<OpFoldResult> strides(padOp.getResultType().getRank(),
                                     rewriter.getIndexAttr(1));
-  Value subview = rewriter.create<memref::SubViewOp>(
-      loc, alloc, /*offsets=*/padOp.getMixedLowPad(), sizes, strides);
+  Value subview = memref::SubViewOp::create(
+      rewriter, loc, alloc, /*offsets=*/padOp.getMixedLowPad(), sizes, strides);
   createMemcpy(rewriter, loc, padOp.getSource(), subview, options);
 
   // Create bufferization.to_tensor with "restrict" and "writable". The returned
   // tensor is a new buffer allocation, so it does not alias with any buffer.
-  Value toTensorOp = rewriter.create<bufferization::ToTensorOp>(
-      loc, padOp.getResult().getType(), alloc, /*restrict=*/true,
+  Value toTensorOp = bufferization::ToTensorOp::create(
+      rewriter, loc, padOp.getResult().getType(), alloc, /*restrict=*/true,
       /*writable=*/true);
   rewriter.replaceOp(padOp, toTensorOp);
   return alloc;
@@ -338,8 +338,9 @@ Value linalg::bufferizeToAllocation(
 
   // Create bufferization.to_tensor with "restrict" and "writable". The returned
   // tensor is a new buffer allocation, so it does not alias with any buffer.
-  Value toTensorOp = rewriter.create<bufferization::ToTensorOp>(
-      loc, allocTensorOp.getResult().getType(), alloc, /*restrict=*/true,
+  Value toTensorOp = bufferization::ToTensorOp::create(
+      rewriter, loc, allocTensorOp.getResult().getType(), alloc,
+      /*restrict=*/true,
       /*writable=*/true);
   rewriter.replaceOp(allocTensorOp, toTensorOp);
   return alloc;
@@ -354,7 +355,7 @@ FailureOr<Operation *> mlir::linalg::rewriteInDestinationPassingStyle(
   auto shape = tensorType.getShape();
 
   // Create tensor.empty.
-  auto emptyOp = rewriter.create<EmptyOp>(loc, tensorType, ValueRange());
+  auto emptyOp = EmptyOp::create(rewriter, loc, tensorType, ValueRange());
 
   // Case: tensor<elem_type>.
   if (shape.empty()) {
@@ -369,7 +370,7 @@ FailureOr<Operation *> mlir::linalg::rewriteInDestinationPassingStyle(
   SmallVector<Value, 2> constants;
   constants.reserve(maxDim);
   for (int i = 0; i < maxDim; ++i)
-    constants.push_back(rewriter.create<arith::ConstantIndexOp>(loc, i));
+    constants.push_back(arith::ConstantIndexOp::create(rewriter, loc, i));
 
   // Traverse all elements and create tensor.insert ops.
   auto elementIt = fromElementsOp.getElements().begin();
@@ -394,16 +395,16 @@ mlir::linalg::rewriteInDestinationPassingStyle(RewriterBase &rewriter,
   RankedTensorType tensorType = cast<RankedTensorType>(generateOp.getType());
 
   // Create tensor.empty.
-  auto emptyOp =
-      rewriter.create<EmptyOp>(loc, tensorType, generateOp.getDynamicExtents());
+  auto emptyOp = EmptyOp::create(rewriter, loc, tensorType,
+                                 generateOp.getDynamicExtents());
 
   // Create linalg.generic.
   SmallVector<utils::IteratorType> iteratorTypes(tensorType.getRank(),
                                                  utils::IteratorType::parallel);
   SmallVector<AffineMap> indexingMaps(
       1, rewriter.getMultiDimIdentityMap(tensorType.getRank()));
-  auto genericOp = rewriter.create<linalg::GenericOp>(
-      loc, tensorType, /*inputs=*/ValueRange(),
+  auto genericOp = linalg::GenericOp::create(
+      rewriter, loc, tensorType, /*inputs=*/ValueRange(),
       /*outputs=*/ValueRange{emptyOp.getResult()}, /*indexingMaps=*/
       indexingMaps, iteratorTypes);
   Block *body = rewriter.createBlock(&genericOp->getRegion(0), {},
@@ -411,7 +412,7 @@ mlir::linalg::rewriteInDestinationPassingStyle(RewriterBase &rewriter,
   rewriter.setInsertionPointToStart(body);
   SmallVector<Value> bbArgReplacements;
   for (int64_t i = 0; i < tensorType.getRank(); ++i)
-    bbArgReplacements.push_back(rewriter.create<linalg::IndexOp>(loc, i));
+    bbArgReplacements.push_back(linalg::IndexOp::create(rewriter, loc, i));
   rewriter.mergeBlocks(&generateOp.getBody().front(), body, bbArgReplacements);
 
   // Update terminator.
@@ -450,13 +451,13 @@ mlir::linalg::rewriteInDestinationPassingStyle(RewriterBase &rewriter,
       llvm::all_of(padOp.getMixedHighPad(), isZeroInteger)) {
     using bufferization::AllocTensorOp;
     Value allocated =
-        rewriter.create<AllocTensorOp>(loc, resultType, dynamicSizes);
+        AllocTensorOp::create(rewriter, loc, resultType, dynamicSizes);
     auto copyOp = rewriter.replaceOpWithNewOp<linalg::CopyOp>(
         padOp, padOp.getSource(), allocated);
     return copyOp.getOperation();
   }
 
-  Value empty = rewriter.create<EmptyOp>(loc, resultType, dynamicSizes);
+  Value empty = EmptyOp::create(rewriter, loc, resultType, dynamicSizes);
   // Create linalg.fill or linalg.generic.
   Operation *fillOp = movePaddingToFillOrGenericOp(rewriter, loc, padOp, empty);
   rewriter.setInsertionPointAfter(fillOp);
@@ -567,8 +568,8 @@ Value linalg::bufferizeToAllocation(
       createMemcpy(rewriter, op->getLoc(), operand->get(), alloc, options);
     }
     rewriter.modifyOpInPlace(op, [&]() {
-      auto toTensorOp = rewriter.create<ToTensorOp>(
-          op->getLoc(), operand->get().getType(), alloc);
+      auto toTensorOp = ToTensorOp::create(rewriter, op->getLoc(),
+                                           operand->get().getType(), alloc);
       operand->set(toTensorOp);
       if (options.bufferizeDestinationOnly) {
         rewriter.modifyOpInPlace(toTensorOp, [&]() {
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
index 70574903f7111..91a297f7b9db7 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
@@ -287,8 +287,8 @@ getOrCreatePackedViewOfOperand(OpBuilder &b, Location loc, PackInfo packInfo,
 
   auto empty = linalg::PackOp::createDestinationTensor(
       b, loc, opOperand->get(), innerTileSizes, innerDimsPos, outerDimsPerm);
-  auto packedOperand = b.create<linalg::PackOp>(
-      loc, opOperand->get(), empty, innerDimsPos, innerTileSizes,
+  auto packedOperand = linalg::PackOp::create(
+      b, loc, opOperand->get(), empty, innerDimsPos, innerTileSizes,
       /*padding=*/std::nullopt, outerDimsPerm);
   return std::make_tuple(packedOperand, indexingMap);
 }
@@ -345,8 +345,9 @@ static GenericOp packGenericOp(RewriterBase &rewriter, GenericOp genericOp,
 
   indexingMaps.push_back(packedOutIndexingMap);
 
-  auto newGenericOp = rewriter.create<linalg::GenericOp>(
-      loc, dest.getType(), inputOperands, dest, indexingMaps, iterTypes,
+  auto newGenericOp = linalg::GenericOp::create(
+      rewriter, loc, dest.getType(), inputOperands, dest, indexingMaps,
+      iterTypes,
       /*bodyBuild=*/nullptr, linalg::getPrunedAttributeList(genericOp));
   rewriter.cloneRegionBefore(genericOp.getRegion(), newGenericOp.getRegion(),
                              newGenericOp.getRegion().begin());
@@ -457,9 +458,9 @@ bubbleUpPackOpThroughGenericOp(RewriterBase &rewriter, linalg::PackOp packOp,
   if (!packOpDest.hasOneUse())
     return failure();
   if (auto emptyOp = packOpDest.getDefiningOp<tensor::EmptyOp>()) {
-    packOpDest = rewriter.create<tensor::EmptyOp>(
-        genericOp->getLoc(), emptyOp.getMixedSizes(),
-        emptyOp.getType().getElementType());
+    packOpDest = tensor::EmptyOp::create(rewriter, genericOp->getLoc(),
+                                         emptyOp.getMixedSizes(),
+                                         emptyOp.getType().getElementType());
   } else {
     DominanceInfo dom(genericOp);
     if (!dom.properlyDominates(packOpDest, genericOp))
@@ -562,8 +563,8 @@ class BubbleUpPackThroughPadOp final : public OpRewritePattern<linalg::PackOp> {
     auto empty = linalg::PackOp::createDestinationTensor(
         rewriter, loc, padOp.getSource(), mixedTiles, innerDimsPos,
         outerDimsPerm);
-    auto sourcePack = rewriter.create<linalg::PackOp>(
-        loc, padOp.getSource(), empty, innerDimsPos, mixedTiles,
+    auto sourcePack = linalg::PackOp::create(
+        rewriter, loc, padOp.getSource(), empty, innerDimsPos, mixedTiles,
         /*padding=*/std::nullopt, outerDimsPerm);
 
     // If we have `outer_dims_perms` we need to adjust the padded dimensions.
@@ -579,17 +580,18 @@ class BubbleUpPackThroughPadOp final : public OpRewritePattern<linalg::PackOp> {
     lowPad.append(pointLoopsSize, rewriter.getIndexAttr(0));
     highPad.append(pointLoopsSize, rewriter.getIndexAttr(0));
 
-    auto newPadOp = rewriter.create<tensor::PadOp>(
-        loc, /*result=*/Type(), sourcePack, lowPad, highPad, paddingVal,
-        padOp.getNofold());
+    auto newPadOp =
+        tensor::PadOp::create(rewriter, loc, /*result=*/Type(), sourcePack,
+                              lowPad, highPad, paddingVal, padOp.getNofold());
 
     // If the pad has more than one user, create an unpack on the new pad to
     // replace the other uses.
     if (!padOp->hasOneUse()) {
       auto unpackEmpty = linalg::UnPackOp::createDestinationTensor(
           rewriter, loc, newPadOp, mixedTiles, innerDimsPos, outerDimsPerm);
-      Value unpackedPad = rewriter.create<linalg::UnPackOp>(
-          loc, newPadOp, unpackEmpty, innerDimsPos, mixedTiles, outerDimsPerm);
+      Value unpackedPad =
+          linalg::UnPackOp::create(rewriter, loc, newPadOp, unpackEmpty,
+                                   innerDimsPos, mixedTiles, outerDimsPerm);
       rewriter.replaceAllUsesExcept(padOp, unpackedPad, sourcePack);
     }
 
@@ -719,9 +721,10 @@ bubbleUpPackOpThroughCollapseShape(tensor::CollapseShapeOp collapseOp,
   auto emptyOp = linalg::PackOp::createDestinationTensor(
       rewriter, packOp.getLoc(), collapseOp.getSrc(), packOp.getMixedTiles(),
       projectedInnerDimsPos, newOuterDimsPerm);
-  auto newPackOp = rewriter.create<linalg::PackOp>(
-      packOp.getLoc(), collapseOp.getSrc(), emptyOp, projectedInnerDimsPos,
-      packOp.getMixedTiles(), packOp.getPaddingValue(), newOuterDimsPerm);
+  auto newPackOp = linalg::PackOp::create(
+      rewriter, packOp.getLoc(), collapseOp.getSrc(), emptyOp,
+      projectedInnerDimsPos, packOp.getMixedTiles(), packOp.getPaddingValue(),
+      newOuterDimsPerm);
 
   SmallVector<ReassociationIndices> newReassocIndices = reassocIndices;
   // First apply the permutation on the reassociations of the outer dims.
@@ -735,8 +738,9 @@ bubbleUpPackOpThroughCollapseShape(tensor::CollapseShapeOp collapseOp,
     nextPos += 1;
   }
 
-  auto newCollapseOp = rewriter.create<tensor::CollapseShapeOp>(
-      collapseOp.getLoc(), packOp.getType(), newPackOp, newReassocIndices);
+  auto newCollapseOp = tensor::CollapseShapeOp::create(
+      rewriter, collapseOp.getLoc(), packOp.getType(), newPackOp,
+      newReassocIndices);
   rewriter.replaceOp(packOp, newCollapseOp);
 
   return success();
@@ -853,13 +857,14 @@ bubbleUpPackOpThroughExpandShape(tensor::ExpandShapeOp expandOp,
   Value destTensor = linalg::PackOp::createDestinationTensor(
       rewriter, packOp.getLoc(), expandOp.getSrc(), packOp.getMixedTiles(),
       projectedInnerDimsPos, /*outerDimsPerm=*/SmallVector<int64_t>{});
-  Value packedVal = rewriter.create<linalg::PackOp>(
-      packOp.getLoc(), expandOp.getSrc(), destTensor, projectedInnerDimsPos,
-      packOp.getMixedTiles(), packOp.getPaddingValue(),
+  Value packedVal = linalg::PackOp::create(
+      rewriter, packOp.getLoc(), expandOp.getSrc(), destTensor,
+      projectedInnerDimsPos, packOp.getMixedTiles(), packOp.getPaddingValue(),
       /*outerDimsPerm=*/SmallVector<int64_t>{});
 
-  Value newExpandOp = rewriter.create<tensor::ExpandShapeOp>(
-      packOp.getLoc(), packOp.getDestType(), packedVal, *reassocExpand);
+  Value newExpandOp = tensor::ExpandShapeOp::create(rewriter, packOp.getLoc(),
+                                                    packOp.getDestType(),
+                                                    packedVal, *reassocExpand);
   rewriter.replaceOp(packOp, newExpandOp);
 
   return success();
@@ -972,15 +977,15 @@ static LogicalResult pushDownUnPackOpThroughExpandShape(
 
   RankedTensorType newExpandType = linalg::PackOp::inferPackedType(
       expandTy, innerTileSizes, projectedInnerDimsPos, newOuterDimsPerm);
-  auto newExpandOp = rewriter.create<tensor::ExpandShapeOp>(
-      expandOp.getLoc(), newExpandType, unPackOp.getSource(),
-      newReassocIndices);
+  auto newExpandOp =
+      tensor::ExpandShapeOp::create(rewriter, expandOp.getLoc(), newExpandType,
+                                    unPackOp.getSource(), newReassocIndices);
 
   auto emptyOp = linalg::UnPackOp::createDestinationTensor(
       rewriter, unPackOp.getLoc(), newExpandOp, unPackOp.getMixedTiles(),
       projectedInnerDimsPos, newOuterDimsPerm);
-  auto newUnPackOp = rewriter.create<linalg::UnPackOp>(
-      unPackOp.getLoc(), newExpandOp.getResult(), emptyOp,
+  auto newUnPackOp = linalg::UnPackOp::create(
+      rewriter, unPackOp.getLoc(), newExpandOp.getResult(), emptyOp,
       projectedInnerDimsPos, unPackOp.getMixedTiles(), newOuterDimsPerm);
   rewriter.replaceOp(expandOp, newUnPackOp);
 
@@ -1212,17 +1217,17 @@ struct PushDownUnPackThroughPadOp : public OpRewritePattern<tensor::PadOp> {
     lowPad.append(pointLoopsSize, rewriter.getIndexAttr(0));
     highPad.append(pointLoopsSize, rewriter.getIndexAttr(0));
 
-    auto newPadOp = rewriter.create<tensor::PadOp>(
-        loc, /*result=*/Type(), unpackOp.getSource(), lowPad, highPad,
-        paddingVal, padOp.getNofold());
+    auto newPadOp = tensor::PadOp::create(rewriter, loc, /*result=*/Type(),
+                                          unpackOp.getSource(), lowPad, highPad,
+                                          paddingVal, padOp.getNofold());
 
     // Inject the linalg.unpack right after the packed padOp.
-    Value outputUnPack = rewriter.create<tensor::EmptyOp>(
-        loc, padOp.getResultType().getShape(),
-        padOp.getResultType().getElementType());
+    Value outputUnPack =
+        tensor::EmptyOp::create(rewriter, loc, padOp.getResultType().getShape(),
+                                padOp.getResultType().getElementType());
 
-    Value replacement = rewriter.create<linalg::UnPackOp>(
-        loc, newPadOp.getResult(), outputUnPack, innerDimsPos,
+    Value replacement = linalg::UnPackOp::create(
+        rewriter, loc, newPadOp.getResult(), outputUnPack, innerDimsPos,
         unpackOp.getMixedTiles(), outerDimsPerm);
     rewriter.replaceOp(padOp, replacement);
     return success();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DecomposeGenericByUnfoldingPermutation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DecomposeGenericByUnfoldingPermutation.cpp
index 692bf595267d4..b7da20c108f3e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DecomposeGenericByUnfoldingPermutation.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DecomposeGenericByUnfoldingPermutation.cpp
@@ -198,10 +198,10 @@ LogicalResult DecomposeProjectedPermutation::matchAndRewrite(
         transposedShape[i] = inputRTType.getShape()[permutation[i]];
 
       Value emptyTensor =
-          rewriter.create<tensor::EmptyOp>(loc, transposedShape, elType);
+          tensor::EmptyOp::create(rewriter, loc, transposedShape, elType);
 
-      auto transposeOp = rewriter.create<TransposeOp>(loc, newInitValues[i],
-                                                      emptyTensor, permutation);
+      auto transposeOp = TransposeOp::create(rewriter, loc, newInitValues[i],
+                                             emptyTensor, permutation);
       newInitValues[i] = transposeOp->getResult(0);
       isChanged = true;
     }
@@ -209,11 +209,11 @@ LogicalResult DecomposeProjectedPermutation::matchAndRewrite(
     // Does it require broadcast?
     if (!broadcastedDims.empty()) {
       assert(broadcastedDims.size() && "should have non size broadcast");
-      Value emptyTensor = rewriter.create<tensor::EmptyOp>(
-          loc, outputShape, inputRTType.getElementType());
+      Value emptyTensor = tensor::EmptyOp::create(rewriter, loc, outputShape,
+                                                  inputRTType.getElementType());
 
-      auto broadcastOp = rewriter.create<linalg::BroadcastOp>(
-          loc, newInitValues[i], emptyTensor, broadcastedDims);
+      auto broadcastOp = linalg::BroadcastOp::create(
+          rewriter, loc, newInitValues[i], emptyTensor, broadcastedDims);
 
       newInitValues[i] = broadcastOp->getResult(0);
       isChanged = true;
@@ -227,7 +227,8 @@ LogicalResult DecomposeProjectedPermutation::matchAndRewrite(
   SmallVector<Value> operands = op->getOperands();
   ValueRange operandsRef(operands);
 
-  auto newOp = rewriter.create<linalg::GenericOp>(
+  auto newOp = linalg::GenericOp::create(
+      rewriter,
       /*location=*/op.getLoc(),
       /*resultTensorTypes=*/op->getResultTypes(),
       /*inputs=*/newInitValues,
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
index 1419175304899..c92a27f0af858 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
@@ -133,13 +133,13 @@ static Value getZero(OpBuilder &b, Location loc, Type elementType) {
   assert(elementType.isIntOrIndexOrFloat() &&
          "expected scalar type while computing zero value");
   if (isa<IntegerType>(elementType))
-    return b.create<arith::ConstantIntOp>(loc, elementType, 0);
+    return arith::ConstantIntOp::create(b, loc, elementType, 0);
   if (elementType.isIndex())
-    return b.create<arith::ConstantIndexOp>(loc, 0);
+    return arith::ConstantIndexOp::create(b, loc, 0);
   // Assume float.
   auto floatType = cast<FloatType>(elementType);
-  return b.create<arith::ConstantFloatOp>(
-      loc, floatType, APFloat::getZero(floatType.getFloatSemantics()));
+  return arith::ConstantFloatOp::create(
+      b, loc, floatType, APFloat::getZero(floatType.getFloatSemantics()));
 }
 
 GenericOp
@@ -188,8 +188,8 @@ DecomposeLinalgOp::createPeeledGenericOp(GenericOp genericOp,
 
     // Fall back path, use an `init_tensor` and identity indexing map.
     AffineMap indexingMap = rewriter.getMultiDimIdentityMap(domain.size());
-    Value emptyTensor =
-        rewriter.create<tensor::EmptyOp>(loc, domain, scalarOpResult.getType());
+    Value emptyTensor = tensor::EmptyOp::create(rewriter, loc, domain,
+                                                scalarOpResult.getType());
     newInitValues.push_back(emptyTensor);
     newResultTypes.push_back(emptyTensor.getType());
     peeledGenericOpIndexingMaps.push_back(indexingMap);
@@ -202,10 +202,10 @@ DecomposeLinalgOp::createPeeledGenericOp(GenericOp genericOp,
   resultTypes.append(newResultTypes.begin(), newResultTypes.end());
   auto indexingMapAttr =
       rewriter.getAffineMapArrayAttr(peeledGenericOpIndexingMaps);
-  return rewriter.create<GenericOp>(
-      loc, resultTypes, genericOp.getInputs(), outsOperands, indexingMapAttr,
-      genericOp.getIteratorTypes(), /*doc=*/nullptr, /*libraryCall=*/nullptr,
-      [](OpBuilder, Location, ValueRange) {});
+  return GenericOp::create(
+      rewriter, loc, resultTypes, genericOp.getInputs(), outsOperands,
+      indexingMapAttr, genericOp.getIteratorTypes(), /*doc=*/nullptr,
+      /*libraryCall=*/nullptr, [](OpBuilder, Location, ValueRange) {});
 }
 
 GenericOp
@@ -239,8 +239,8 @@ DecomposeLinalgOp::createResidualGenericOp(GenericOp genericOp,
     indexingMaps.push_back(genericOp.getMatchingIndexingMap(&outOperand));
 
   auto indexingMapAttr = rewriter.getAffineMapArrayAttr(indexingMaps);
-  return rewriter.create<GenericOp>(
-      genericOp->getLoc(), genericOp->getResultTypes(),
+  return GenericOp::create(
+      rewriter, genericOp->getLoc(), genericOp->getResultTypes(),
       residualGenericOpOperands, genericOp.getOutputs(), indexingMapAttr,
       genericOp.getIteratorTypes(), /*doc=*/nullptr, /*libraryCall=*/nullptr,
       [](OpBuilder, Location, ValueRange) {});
@@ -324,7 +324,7 @@ DecomposeLinalgOp::matchAndRewrite(GenericOp genericOp,
     yieldedVals.append(llvm::to_vector(
         llvm::map_range(peeledScalarOperation->getResults(),
                         [](OpResult opr) -> Value { return opr; })));
-    rewriter.create<YieldOp>(genericOp.getLoc(), yieldedVals);
+    YieldOp::create(rewriter, genericOp.getLoc(), yieldedVals);
   }
 
   /// In the split operations, replace block arguments uses that refer to
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
index ef24eb881d68b..830905495e759 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp
@@ -34,8 +34,8 @@ static Value sourceMaterializationCallback(OpBuilder &builder, Type type,
 
   // A detensored value is converted back by creating a new tensor from its
   // element(s).
-  return builder.create<tensor::FromElementsOp>(
-      loc, RankedTensorType::get({}, inputType), inputs[0]);
+  return tensor::FromElementsOp::create(
+      builder, loc, RankedTensorType::get({}, inputType), inputs[0]);
 }
 
 namespace {
@@ -147,7 +147,7 @@ class DetensorizeTypeConverter : public TypeConverter {
     // A tensor value is detensoried by extracting its element(s).
     addTargetMaterialization([](OpBuilder &builder, Type type,
                                 ValueRange inputs, Location loc) -> Value {
-      return builder.create<tensor::ExtractOp>(loc, inputs[0], ValueRange{});
+      return tensor::ExtractOp::create(builder, loc, inputs[0], ValueRange{});
     });
 
     addSourceMaterialization(sourceMaterializationCallback);
@@ -480,8 +480,8 @@ struct LinalgDetensorize
     Block *postEntryBlock =
         rewriter.splitBlock(entryBlock, entryBlock->begin());
     rewriter.setInsertionPointToStart(entryBlock);
-    auto branch =
-        rewriter.create<cf::BranchOp>(rewriter.getUnknownLoc(), postEntryBlock);
+    auto branch = cf::BranchOp::create(rewriter, rewriter.getUnknownLoc(),
+                                       postEntryBlock);
 
     if (aggressiveMode.getValue()) {
       AggressiveDetensoringModel costModel;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index 6c59cd65c1b99..745a40dbc4eea 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -118,16 +118,17 @@ struct MoveInitOperandsToInput : public OpRewritePattern<GenericOp> {
       OpBuilder::InsertionGuard guard(rewriter);
       rewriter.setInsertionPointAfterValue(op->get());
       auto elemType = cast<ShapedType>(op->get().getType()).getElementType();
-      auto empty = rewriter.create<tensor::EmptyOp>(
-          loc, tensor::getMixedSizes(rewriter, loc, op->get()), elemType);
+      auto empty = tensor::EmptyOp::create(
+          rewriter, loc, tensor::getMixedSizes(rewriter, loc, op->get()),
+          elemType);
 
       unsigned start = genericOp.getDpsInits().getBeginOperandIndex();
       newOutputOperands[op->getOperandNumber() - start] = empty.getResult();
     }
 
-    auto newOp = rewriter.create<GenericOp>(
-        loc, genericOp.getResultTypes(), newInputOperands, newOutputOperands,
-        newIndexingMaps, genericOp.getIteratorTypesArray(),
+    auto newOp = GenericOp::create(
+        rewriter, loc, genericOp.getResultTypes(), newInputOperands,
+        newOutputOperands, newIndexingMaps, genericOp.getIteratorTypesArray(),
         /*bodyBuild=*/nullptr, linalg::getPrunedAttributeList(genericOp));
 
     OpBuilder::InsertionGuard guard(rewriter);
@@ -295,8 +296,8 @@ static Value collapseValue(
     MemRefLayoutAttrInterface layout;
     auto targetType = MemRefType::get(targetShape, memrefType.getElementType(),
                                       layout, memrefType.getMemorySpace());
-    return rewriter.create<memref::CollapseShapeOp>(loc, targetType, operand,
-                                                    reassociation);
+    return memref::CollapseShapeOp::create(rewriter, loc, targetType, operand,
+                                           reassociation);
   }
   if (auto tensorType = dyn_cast<RankedTensorType>(operand.getType())) {
     if (rankReductionStrategy ==
@@ -314,8 +315,8 @@ static Value collapseValue(
         "unknown rank reduction strategy");
     auto targetType =
         RankedTensorType::get(targetShape, tensorType.getElementType());
-    return rewriter.create<tensor::CollapseShapeOp>(loc, targetType, operand,
-                                                    reassociation);
+    return tensor::CollapseShapeOp::create(rewriter, loc, targetType, operand,
+                                           reassociation);
   }
   llvm_unreachable("unsupported operand type");
 }
@@ -564,8 +565,8 @@ linalg::dropUnitDims(RewriterBase &rewriter, GenericOp genericOp,
     for (unsigned i : llvm::seq<unsigned>(0, genericOp.getNumResults()))
       resultTypes.push_back(newOutputs[i].getType());
     GenericOp replacementOp =
-        b.create<GenericOp>(loc, resultTypes, newInputs, newOutputs,
-                            newIndexingMaps, newIteratorTypes);
+        GenericOp::create(b, loc, resultTypes, newInputs, newOutputs,
+                          newIndexingMaps, newIteratorTypes);
     b.cloneRegionBefore(genericOp.getRegion(), replacementOp.getRegion(),
                         replacementOp.getRegion().begin());
     // 5a. Replace `linalg.index` operations that refer to the dropped unit
@@ -685,8 +686,8 @@ struct DropPadUnitDims : public OpRewritePattern<tensor::PadOp> {
         collapseValue(rewriter, padOp.getLoc(), padOp.getSource(), newShape,
                       reassociationMap, options.rankReductionStrategy);
 
-    auto newPadOp = rewriter.create<tensor::PadOp>(
-        padOp.getLoc(), /*result=*/Type(), collapsedSource, newLowPad,
+    auto newPadOp = tensor::PadOp::create(
+        rewriter, padOp.getLoc(), /*result=*/Type(), collapsedSource, newLowPad,
         newHighPad, paddingVal, padOp.getNofold());
 
     Value dest = padOp.getResult();
@@ -703,9 +704,8 @@ struct DropPadUnitDims : public OpRewritePattern<tensor::PadOp> {
         expandedSizes.push_back(tensor::getMixedSize(
             rewriter, padOp.getLoc(), newPadOp, dim - numUnitDims));
       }
-      dest = rewriter.create<tensor::EmptyOp>(
-          padOp.getLoc(), expandedSizes,
-          padOp.getResultType().getElementType());
+      dest = tensor::EmptyOp::create(rewriter, padOp.getLoc(), expandedSizes,
+                                     padOp.getResultType().getElementType());
     }
 
     Value expandedValue =
@@ -746,8 +746,9 @@ struct RankReducedExtractSliceOp
             strides));
 
     Location loc = sliceOp.getLoc();
-    Value newSlice = rewriter.create<tensor::ExtractSliceOp>(
-        loc, rankReducedType, sliceOp.getSource(), offsets, sizes, strides);
+    Value newSlice = tensor::ExtractSliceOp::create(
+        rewriter, loc, rankReducedType, sliceOp.getSource(), offsets, sizes,
+        strides);
     rewriter.replaceOpWithNewOp<tensor::ExpandShapeOp>(
         sliceOp, resultType, newSlice, *reassociation);
     return success();
@@ -780,8 +781,8 @@ struct RankReducedInsertSliceOp : public OpRewritePattern<InsertOpTy> {
       // parallel case.
       if (std::is_same<InsertOpTy, tensor::ParallelInsertSliceOp>::value)
         rewriter.setInsertionPoint(insertSliceOp->getParentOp());
-      reshapedSource = rewriter.create<tensor::CollapseShapeOp>(
-          loc, insertSliceOp.getSource(), *reassociation);
+      reshapedSource = tensor::CollapseShapeOp::create(
+          rewriter, loc, insertSliceOp.getSource(), *reassociation);
     }
     rewriter.replaceOpWithNewOp<InsertOpTy>(
         insertSliceOp, reshapedSource, insertSliceOp.getDest(),
@@ -931,8 +932,8 @@ struct RankReduceContractionOps : OpRewritePattern<FromOpTy> {
   /// Expand result tensor.
   Value expandResult(PatternRewriter &rewriter, Value result,
                      RankedTensorType expandedType, int64_t dim) const {
-    return rewriter.create<tensor::ExpandShapeOp>(
-        result.getLoc(), expandedType, result,
+    return tensor::ExpandShapeOp::create(
+        rewriter, result.getLoc(), expandedType, result,
         getReassociationForReshapeAtDim(expandedType.getRank(), dim));
   }
 
@@ -967,9 +968,9 @@ struct RankReduceContractionOps : OpRewritePattern<FromOpTy> {
     SmallVector<Type, 1> collapsedResultTy;
     if (isa<RankedTensorType>(collapsedInit.getType()))
       collapsedResultTy.push_back(collapsedInit.getType());
-    auto collapsedOp = rewriter.create<ToOpTy>(
-        loc, collapsedResultTy, ValueRange{collapsedLhs, collapsedRhs},
-        ValueRange{collapsedInit});
+    auto collapsedOp = ToOpTy::create(rewriter, loc, collapsedResultTy,
+                                      ValueRange{collapsedLhs, collapsedRhs},
+                                      ValueRange{collapsedInit});
     for (auto attr : contractionOp->getAttrs()) {
       if (attr.getName() == LinalgDialect::kMemoizedIndexingMapsAttrName ||
           attr.getName() == "indexing_maps")
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 8a5c138304d5b..4a66b8b9619f4 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -237,12 +237,12 @@ static void generateFusedElementwiseOpRegion(
     fusedIndices.reserve(numFusedOpLoops);
     llvm::transform(llvm::seq<uint64_t>(0, numFusedOpLoops),
                     std::back_inserter(fusedIndices), [&](uint64_t dim) {
-                      return rewriter.create<IndexOp>(producer.getLoc(), dim);
+                      return IndexOp::create(rewriter, producer.getLoc(), dim);
                     });
     for (IndexOp indexOp :
          llvm::make_early_inc_range(producerBlock.getOps<IndexOp>())) {
-      Value newIndex = rewriter.create<affine::AffineApplyOp>(
-          producer.getLoc(),
+      Value newIndex = affine::AffineApplyOp::create(
+          rewriter, producer.getLoc(),
           consumerToProducerLoopsMap.getSubMap(indexOp.getDim()), fusedIndices);
       mapper.map(indexOp.getResult(), newIndex);
     }
@@ -328,7 +328,7 @@ static void generateFusedElementwiseOpRegion(
   }
   for (auto consumerYieldVal : consumerYieldOp.getOperands())
     fusedYieldValues.push_back(mapper.lookupOrDefault(consumerYieldVal));
-  rewriter.create<YieldOp>(fusedOp.getLoc(), fusedYieldValues);
+  YieldOp::create(rewriter, fusedOp.getLoc(), fusedYieldValues);
 
   // Sanity checks.
   assert(fusedBlock->getNumArguments() == fusedOp.getNumOperands() &&
@@ -417,8 +417,8 @@ mlir::linalg::fuseElementwiseOps(RewriterBase &rewriter,
   }
 
   // Generate the fused op.
-  auto fusedOp = rewriter.create<GenericOp>(
-      consumer.getLoc(), fusedResultTypes, fusedInputOperands,
+  auto fusedOp = GenericOp::create(
+      rewriter, consumer.getLoc(), fusedResultTypes, fusedInputOperands,
       fusedOutputOperands, rewriter.getAffineMapArrayAttr(fusedIndexMaps),
       consumer.getIteratorTypes(),
       /*doc=*/nullptr,
@@ -751,9 +751,9 @@ static void updateExpandedGenericOpRegion(PatternRewriter &rewriter,
     expandedIndices.reserve(expandedDims.size() - 1);
     llvm::transform(
         expandedDims.drop_front(), std::back_inserter(expandedIndices),
-        [&](int64_t dim) { return rewriter.create<IndexOp>(loc, dim); });
+        [&](int64_t dim) { return IndexOp::create(rewriter, loc, dim); });
     OpFoldResult newIndex =
-        rewriter.create<IndexOp>(loc, expandedDims.front()).getResult();
+        IndexOp::create(rewriter, loc, expandedDims.front()).getResult();
     for (auto [expandedShape, expandedIndex] :
          llvm::zip(expandedDimsShape, expandedIndices)) {
       AffineExpr idx, acc, shape;
@@ -797,8 +797,8 @@ static Operation *createExpandedTransposeOp(PatternRewriter &rewriter,
       newPerm.push_back(dim);
     }
   }
-  return rewriter.create<TransposeOp>(transposeOp.getLoc(), expandedInput,
-                                      output, invertPermutationVector(newPerm));
+  return TransposeOp::create(rewriter, transposeOp.getLoc(), expandedInput,
+                             output, invertPermutationVector(newPerm));
 }
 
 // Create an expanded generic op.
@@ -814,9 +814,9 @@ static Operation *createExpandedGenericOp(
     for (auto j : expansionInfo.getExpandedDims(i))
       iteratorTypes[j] = type;
 
-  Operation *fused = rewriter.create<GenericOp>(
-      linalgOp.getLoc(), resultTypes, expandedOpOperands, outputs,
-      expandedOpIndexingMaps, iteratorTypes);
+  Operation *fused = GenericOp::create(rewriter, linalgOp.getLoc(), resultTypes,
+                                       expandedOpOperands, outputs,
+                                       expandedOpIndexingMaps, iteratorTypes);
 
   Region &fusedRegion = fused->getRegion(0);
   Region &originalRegion = linalgOp->getRegion(0);
@@ -934,8 +934,8 @@ fuseWithReshapeByExpansion(LinalgOp linalgOp, Operation *reshapeOp,
                 reassociation,
                 /*isExpandingReshape=*/true)))
           return std::nullopt;
-        expandedOpOperands.push_back(rewriter.create<tensor::ExpandShapeOp>(
-            loc, expandedOperandType, opOperand->get(), reassociation,
+        expandedOpOperands.push_back(tensor::ExpandShapeOp::create(
+            rewriter, loc, expandedOperandType, opOperand->get(), reassociation,
             expandedOperandShape));
         continue;
       }
@@ -962,8 +962,8 @@ fuseWithReshapeByExpansion(LinalgOp linalgOp, Operation *reshapeOp,
               reassociation,
               /*isExpandingReshape=*/true)))
         return std::nullopt;
-      outputs.push_back(rewriter.create<tensor::ExpandShapeOp>(
-          loc, expandedOutputType, opOperand.get(), reassociation,
+      outputs.push_back(tensor::ExpandShapeOp::create(
+          rewriter, loc, expandedOutputType, opOperand.get(), reassociation,
           expandedOutputShape));
     } else {
       outputs.push_back(opOperand.get());
@@ -985,8 +985,8 @@ fuseWithReshapeByExpansion(LinalgOp linalgOp, Operation *reshapeOp,
               linalgOp.getMatchingIndexingMap(
                   linalgOp.getDpsInitOperand(resultNumber)),
               expansionInfo);
-      resultVals.push_back(rewriter.create<tensor::CollapseShapeOp>(
-          linalgOp.getLoc(), opResult.getType(),
+      resultVals.push_back(tensor::CollapseShapeOp::create(
+          rewriter, linalgOp.getLoc(), opResult.getType(),
           fusedOp->getResult(resultNumber), reassociation));
     } else {
       resultVals.push_back(fusedOp->getResult(resultNumber));
@@ -1087,8 +1087,8 @@ class FoldPadWithProducerReshapeOpByExpansion
 
     Location loc = padOp->getLoc();
     RankedTensorType expandedPaddedType = paddedType.clone(expandedPaddedShape);
-    auto newPadOp = rewriter.create<tensor::PadOp>(
-        loc, expandedPaddedType, reshapeOp.getSrc(), newLow, newHigh,
+    auto newPadOp = tensor::PadOp::create(
+        rewriter, loc, expandedPaddedType, reshapeOp.getSrc(), newLow, newHigh,
         padOp.getConstantPaddingValue(), padOp.getNofold());
 
     rewriter.replaceOpWithNewOp<tensor::CollapseShapeOp>(
@@ -1604,7 +1604,7 @@ static void generateCollapsedIndexingRegion(
        enumerate(collapsingInfo.getCollapsedOpToOrigOpMapping())) {
     ReassociationIndicesRef foldedDimsRef(foldedDims.value());
     Value newIndexVal =
-        rewriter.create<linalg::IndexOp>(loc, foldedDims.index());
+        linalg::IndexOp::create(rewriter, loc, foldedDims.index());
     for (auto dim : llvm::reverse(foldedDimsRef.drop_front())) {
       Value loopDim =
           getValueOrCreateConstantIndexOp(rewriter, loc, loopRange[dim]);
@@ -1688,9 +1688,10 @@ GenericOp cloneToCollapsedOp<GenericOp>(RewriterBase &rewriter,
   SmallVector<utils::IteratorType> iteratorTypes(getCollapsedOpIteratorTypes(
       origOp.getIteratorTypesArray(), collapsingInfo));
 
-  GenericOp collapsedOp = rewriter.create<linalg::GenericOp>(
-      origOp.getLoc(), resultTypes, inputOperands, outputOperands, indexingMaps,
-      iteratorTypes, [](OpBuilder &builder, Location loc, ValueRange args) {});
+  GenericOp collapsedOp = linalg::GenericOp::create(
+      rewriter, origOp.getLoc(), resultTypes, inputOperands, outputOperands,
+      indexingMaps, iteratorTypes,
+      [](OpBuilder &builder, Location loc, ValueRange args) {});
   Block *origOpBlock = &origOp->getRegion(0).front();
   Block *collapsedOpBlock = &collapsedOp->getRegion(0).front();
   rewriter.mergeBlocks(origOpBlock, collapsedOpBlock,
@@ -1795,12 +1796,12 @@ FailureOr<CollapseResult> mlir::linalg::collapseOpIterationDims(
       if (isa<MemRefType>(collapsedOpResult.getType())) {
         MemRefType expandShapeResultType = MemRefType::get(
             originalResultType.getShape(), originalResultType.getElementType());
-        result = rewriter.create<memref::ExpandShapeOp>(
-            loc, expandShapeResultType, collapsedOpResult, reassociation,
-            resultShape);
+        result = memref::ExpandShapeOp::create(
+            rewriter, loc, expandShapeResultType, collapsedOpResult,
+            reassociation, resultShape);
       } else {
-        result = rewriter.create<tensor::ExpandShapeOp>(
-            loc, originalResultType, collapsedOpResult, reassociation,
+        result = tensor::ExpandShapeOp::create(
+            rewriter, loc, originalResultType, collapsedOpResult, reassociation,
             resultShape);
       }
       results.push_back(result);
@@ -1983,8 +1984,8 @@ class FoldPadWithProducerReshapeOpByCollapsing
 
     RankedTensorType collapsedPaddedType =
         paddedType.clone(collapsedPaddedShape);
-    auto newPadOp = rewriter.create<tensor::PadOp>(
-        loc, collapsedPaddedType, reshapeOp.getSrc(), newLow, newHigh,
+    auto newPadOp = tensor::PadOp::create(
+        rewriter, loc, collapsedPaddedType, reshapeOp.getSrc(), newLow, newHigh,
         padOp.getConstantPaddingValue(), padOp.getNofold());
 
     rewriter.replaceOpWithNewOp<tensor::ExpandShapeOp>(
@@ -2118,17 +2119,18 @@ class FoldScalarOrSplatConstant : public OpRewritePattern<GenericOp> {
 
       // Create a constant scalar value from the splat constant.
       Value scalarConstant =
-          rewriter.create<arith::ConstantOp>(def->getLoc(), constantAttr);
+          arith::ConstantOp::create(rewriter, def->getLoc(), constantAttr);
 
       SmallVector<Value> outputOperands = genericOp.getOutputs();
-      auto fusedOp = rewriter.create<GenericOp>(
-          rewriter.getFusedLoc(fusedLocs), genericOp->getResultTypes(),
-          /*inputs=*/fusedOperands,
-          /*outputs=*/outputOperands,
-          rewriter.getAffineMapArrayAttr(fusedIndexMaps),
-          genericOp.getIteratorTypes(),
-          /*doc=*/nullptr,
-          /*library_call=*/nullptr);
+      auto fusedOp =
+          GenericOp::create(rewriter, rewriter.getFusedLoc(fusedLocs),
+                            genericOp->getResultTypes(),
+                            /*inputs=*/fusedOperands,
+                            /*outputs=*/outputOperands,
+                            rewriter.getAffineMapArrayAttr(fusedIndexMaps),
+                            genericOp.getIteratorTypes(),
+                            /*doc=*/nullptr,
+                            /*library_call=*/nullptr);
 
       // Map the block argument corresponding to the replaced argument with the
       // scalar constant.
@@ -2184,8 +2186,8 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
         modifiedOutput = true;
         SmallVector<OpFoldResult> mixedSizes =
             tensor::getMixedSizes(rewriter, loc, operandVal);
-        Value emptyTensor = rewriter.create<tensor::EmptyOp>(
-            loc, mixedSizes, operandType.getElementType());
+        Value emptyTensor = tensor::EmptyOp::create(
+            rewriter, loc, mixedSizes, operandType.getElementType());
         op->setOperand(opOperand.getOperandNumber(), emptyTensor);
       }
     }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp
index c4af09ca01421..c52315333c5b3 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp
@@ -64,8 +64,8 @@ getOrCreateOperandsMatchingResultTypes(OpBuilder &b, Operation *op) {
       continue;
 
     // Extract static / dynamic shape mix from the first operand.
-    res.push_back(b.create<tensor::EmptyOp>(
-        loc, tensor::getMixedSizes(b, loc, operands.front()),
+    res.push_back(tensor::EmptyOp::create(
+        b, loc, tensor::getMixedSizes(b, loc, operands.front()),
         cast<RankedTensorType>(t).getElementType()));
   }
   return res;
@@ -104,7 +104,7 @@ struct ConvertAnyElementwiseMappableOpOnRankedTensors : public RewritePattern {
               builder.create(loc, op->getName().getIdentifier(),
                              regionArgs.take_front(op->getNumOperands()),
                              resultTypes, op->getAttrs());
-          builder.create<linalg::YieldOp>(loc, scalarOp->getResults());
+          linalg::YieldOp::create(builder, loc, scalarOp->getResults());
         });
     return success();
   }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp b/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
index d375878fb2c91..9974ccd8ae79e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp
@@ -259,8 +259,8 @@ mlir::linalg::deduplicateOperandsAndRemoveDeadResults(
   for (Value v : newOutputOperands)
     if (isa<TensorType>(v.getType()))
       newResultTypes.push_back(v.getType());
-  auto newOp = rewriter.create<GenericOp>(
-      loc, newResultTypes, newInputOperands, newOutputOperands,
+  auto newOp = GenericOp::create(
+      rewriter, loc, newResultTypes, newInputOperands, newOutputOperands,
       rewriter.getAffineMapArrayAttr(newIndexingMaps),
       genericOp.getIteratorTypes(), genericOp.getDocAttr(),
       genericOp.getLibraryCallAttr(),
diff --git a/mlir/lib/Dialect/Linalg/Transforms/FusePadOpWithLinalgProducer.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusePadOpWithLinalgProducer.cpp
index 44469bc404a7c..0ca89040f3453 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/FusePadOpWithLinalgProducer.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/FusePadOpWithLinalgProducer.cpp
@@ -72,14 +72,14 @@ struct FusePadOp : OpRewritePattern<tensor::PadOp> {
     // Create the tensor of same size as output of the pad op.
     RankedTensorType padResultType = padOp.getResultType();
     auto resultSizes = resultShape[0];
-    auto emptyTensor = rewriter.create<tensor::EmptyOp>(
-        loc, resultSizes, padResultType.getElementType());
+    auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, resultSizes,
+                                               padResultType.getElementType());
 
     // Fill the tensor with the pad value.
     // TODO: There is an option to fill only the boundaries. For now just
     // filling the whole tensor.
-    auto fillTensor =
-        rewriter.create<linalg::FillOp>(loc, padValue, emptyTensor.getResult());
+    auto fillTensor = linalg::FillOp::create(rewriter, loc, padValue,
+                                             emptyTensor.getResult());
 
     // Construct a slice of the fill result that is to be replaced with the
     // result of the generic op. The low pad values are the offsets, the size of
@@ -93,15 +93,15 @@ struct FusePadOp : OpRewritePattern<tensor::PadOp> {
          llvm::enumerate(cast<RankedTensorType>(source.getType()).getShape())) {
       if (ShapedType::isDynamic(shape.value())) {
         sizes.push_back(
-            rewriter.create<tensor::DimOp>(loc, source, shape.index())
+            tensor::DimOp::create(rewriter, loc, source, shape.index())
                 .getResult());
       } else {
         sizes.push_back(rewriter.getIndexAttr(shape.value()));
       }
     }
     SmallVector<OpFoldResult> strides(offsets.size(), rewriter.getIndexAttr(1));
-    auto slice = rewriter.create<tensor::ExtractSliceOp>(
-        loc, fillTensor.getResult(0), offsets, sizes, strides);
+    auto slice = tensor::ExtractSliceOp::create(
+        rewriter, loc, fillTensor.getResult(0), offsets, sizes, strides);
 
     // Clone the generic op.
     auto clonedOp =
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 9bc7be2623849..41252c68ffda9 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -277,7 +277,7 @@ mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult,
   // mismatches. Insert a `tensor.cast` op to propagate the transformation
   // invariant that types are compatible.
   if (consumerType != def.getType())
-    def = b.create<tensor::CastOp>(fusedProducer.getLoc(), consumerType, def);
+    def = tensor::CastOp::create(b, fusedProducer.getLoc(), consumerType, def);
   consumerOpOperand.set(def);
   return FusionInfo{cast<LinalgOp>(producerOpResult.getOwner()), fusedProducer};
 }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
index 05f2157b77aeb..3e31393fd51ed 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
@@ -61,8 +61,9 @@ FailureOr<GenericOp> mlir::linalg::generalizeNamedOp(RewriterBase &rewriter,
   // All named ops have a region attached that can be inlined.
   assert(linalgOp->getNumRegions() == 1 &&
          "expect named op to have one region attached");
-  GenericOp genericOp = rewriter.create<GenericOp>(
-      linalgOp.getLoc(), resultTypes, inputs, outputs, indexingMaps, iterators);
+  GenericOp genericOp =
+      GenericOp::create(rewriter, linalgOp.getLoc(), resultTypes, inputs,
+                        outputs, indexingMaps, iterators);
   rewriter.inlineRegionBefore(linalgOp->getRegion(0), genericOp.getRegion(),
                               genericOp.getRegion().begin());
   rewriter.replaceOp(linalgOp, genericOp->getResults());
diff --git a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
index 94ed46442180c..19729af1bc9fa 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
@@ -591,8 +591,8 @@ static FailureOr<PackingResult> buildPackingLoopNestImpl(
 
     // Create a packing loop that takes `hoistedPackedTensor` as iteration
     // argument.
-    auto clonedForOp = rewriter.create<scf::ForOp>(
-        loc, bvm.lookupOrDefault(forOp.getLowerBound()),
+    auto clonedForOp = scf::ForOp::create(
+        rewriter, loc, bvm.lookupOrDefault(forOp.getLowerBound()),
         bvm.lookupOrDefault(forOp.getUpperBound()),
         bvm.lookupOrDefault(forOp.getStep()), hoistedPackedTensor);
 
@@ -640,11 +640,11 @@ static FailureOr<PackingResult> buildPackingLoopNestImpl(
   TransposeOp maybeTransposeOp;
   Value paddedTensor = bvm.lookup(opToHoist.getResult());
   if (!transposeVector.empty()) {
-    Value outputTensor = rewriter.create<tensor::ExtractSliceOp>(
-        loc, transposedTensorType, hoistedPackedTensor, offsets, sizes,
-        strides);
-    maybeTransposeOp = rewriter.create<linalg::TransposeOp>(
-        loc, paddedTensor, outputTensor, transposeVector);
+    Value outputTensor = tensor::ExtractSliceOp::create(
+        rewriter, loc, transposedTensorType, hoistedPackedTensor, offsets,
+        sizes, strides);
+    maybeTransposeOp = linalg::TransposeOp::create(
+        rewriter, loc, paddedTensor, outputTensor, transposeVector);
     paddedTensor = maybeTransposeOp.getResult()[0];
   }
 
@@ -652,15 +652,16 @@ static FailureOr<PackingResult> buildPackingLoopNestImpl(
   if (nPackedLoops > 0) {
     // Step 4. Create InsertSliceOp at the innermost loop level, inserting an
     // optionally transposed padded slice into the packed tensor.
-    Value inserted = rewriter.create<tensor::InsertSliceOp>(
-        loc, paddedTensor, hoistedPackedTensor, offsets, sizes, strides);
+    Value inserted = tensor::InsertSliceOp::create(rewriter, loc, paddedTensor,
+                                                   hoistedPackedTensor, offsets,
+                                                   sizes, strides);
 
     // Step 5. Iteratively pop the stack and propagate the yield.
     Value valueToYield = inserted;
     for (Value iv : llvm::reverse(clonedLoopIvs)) {
       auto forOp = scf::getForInductionVarOwner(iv);
       rewriter.setInsertionPointToEnd(&forOp.getRegion().front());
-      rewriter.create<scf::YieldOp>(loc, valueToYield);
+      scf::YieldOp::create(rewriter, loc, valueToYield);
       valueToYield = forOp.getResult(0);
     }
   }
@@ -712,8 +713,8 @@ static FailureOr<PackingResult> buildPackingLoopNestImpl(
   rewriter.setInsertionPoint(outerLoop);
   SmallVector<Value> dynamicTensorSizes =
       analysis.getHoistedPackedTensorSizes(rewriter, loc);
-  auto emptyOp = rewriter.create<tensor::EmptyOp>(
-      loc, hoistedPackedTensorType.getShape(),
+  auto emptyOp = tensor::EmptyOp::create(
+      rewriter, loc, hoistedPackedTensorType.getShape(),
       hoistedPackedTensorType.getElementType(), dynamicTensorSizes);
 
   return buildPackingLoopNestImpl(rewriter, bvm, opToHoist, transposeVector,
@@ -840,8 +841,8 @@ padThroughLoopIterArg(RewriterBase &rewriter, Value paddedValueBeforeHoisting,
   {
     OpBuilder::InsertionGuard g(rewriter);
     rewriter.setInsertionPointAfter(forOp);
-    extracted = rewriter.create<tensor::ExtractSliceOp>(
-        hoistedPackedTensor.getLoc(), hoistedPackedTensor,
+    extracted = tensor::ExtractSliceOp::create(
+        rewriter, hoistedPackedTensor.getLoc(), hoistedPackedTensor,
         outerSliceOp.getMixedOffsets(), outerSliceOp.getMixedSizes(),
         outerSliceOp.getMixedStrides());
     rewriter.replaceAllUsesWith(forOp.getResult(iterArgNumber), extracted);
@@ -934,8 +935,8 @@ static Value replaceByPackingResult(RewriterBase &rewriter,
   // offsets = [maybe_leading_ivs, 0 .. 0].
   // sizes = [1 .. 1, transposedShape] (defined above).
   // strides = [1 .. 1] (defined above)
-  return rewriter.create<tensor::ExtractSliceOp>(
-      loc, transposedTensorType, hoistedPackedTensor, offsets,
+  return tensor::ExtractSliceOp::create(
+      rewriter, loc, transposedTensorType, hoistedPackedTensor, offsets,
       packingResult.sizes, packingResult.strides);
 }
 
@@ -982,10 +983,11 @@ FailureOr<Value> mlir::linalg::hoistPaddingOnTensors(
     OpBuilder::InsertionGuard g(rewriter);
     rewriter.setInsertionPointAfter(newResult.getDefiningOp());
     // Transpose the packed tensor back to the original storage order.
-    Value emptyTensor = rewriter.create<tensor::EmptyOp>(
-        loc, paddedTensorType.getShape(), paddedTensorType.getElementType());
-    TransposeOp unTransposeOp = rewriter.create<linalg::TransposeOp>(
-        loc, newResult, emptyTensor, transposeVector);
+    Value emptyTensor =
+        tensor::EmptyOp::create(rewriter, loc, paddedTensorType.getShape(),
+                                paddedTensorType.getElementType());
+    TransposeOp unTransposeOp = linalg::TransposeOp::create(
+        rewriter, loc, newResult, emptyTensor, transposeVector);
     newResult = unTransposeOp.getResult()[0];
     transposeOps.push_back(unTransposeOp);
   }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
index f2e51c29f3241..70bc7b6360ceb 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
@@ -53,9 +53,9 @@ static scf::ForOp replaceWithDifferentYield(RewriterBase &rewriter,
   assert(index < inits.size());
   inits[index] = newInitOperand;
 
-  scf::ForOp newLoop = rewriter.create<scf::ForOp>(
-      loop.getLoc(), loop.getLowerBound(), loop.getUpperBound(), loop.getStep(),
-      inits, [](OpBuilder &, Location, Value, ValueRange) {});
+  scf::ForOp newLoop = scf::ForOp::create(
+      rewriter, loop.getLoc(), loop.getLowerBound(), loop.getUpperBound(),
+      loop.getStep(), inits, [](OpBuilder &, Location, Value, ValueRange) {});
 
   // Generate the new yield with the replaced operand.
   auto yieldOp = cast<scf::YieldOp>(loop.getBody()->getTerminator());
diff --git a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
index 1f3336d2bfbb9..39cc21df40583 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
@@ -60,9 +60,9 @@ struct InlineScalarOperands : public OpRewritePattern<GenericOp> {
 
     Location loc = genericOp->getLoc();
     SmallVector<Value> outputOperands = genericOp.getOutputs();
-    auto newOp = rewriter.create<GenericOp>(
-        loc, genericOp->getResultTypes(), newOperands, outputOperands,
-        newIndexingMaps, genericOp.getIteratorTypesArray());
+    auto newOp = GenericOp::create(rewriter, loc, genericOp->getResultTypes(),
+                                   newOperands, outputOperands, newIndexingMaps,
+                                   genericOp.getIteratorTypesArray());
     rewriter.cloneRegionBefore(genericOp.getRegion(), newOp.getRegion(),
                                newOp.getRegion().begin());
 
@@ -77,11 +77,11 @@ struct InlineScalarOperands : public OpRewritePattern<GenericOp> {
       SmallVector<Value> indicesValues;
       for (auto idx : indices)
         indicesValues.emplace_back(
-            rewriter.create<arith::ConstantIndexOp>(loc, idx));
+            arith::ConstantIndexOp::create(rewriter, loc, idx));
       Value scalarValue = opOperand->get();
       if (isa<RankedTensorType>(scalarValue.getType())) {
-        scalarValue =
-            rewriter.create<tensor::ExtractOp>(loc, scalarValue, indicesValues);
+        scalarValue = tensor::ExtractOp::create(rewriter, loc, scalarValue,
+                                                indicesValues);
       }
       body->getArgument(idx).replaceAllUsesWith(scalarValue);
       body->eraseArgument(idx);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
index a92a0c83e0316..96e6eee6d7a0d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
@@ -88,7 +88,8 @@ mlir::linalg::interchangeGenericOp(RewriterBase &rewriter, GenericOp genericOp,
       allIndices.reserve(genericOp.getNumLoops());
       llvm::transform(llvm::seq<uint64_t>(0, genericOp.getNumLoops()),
                       std::back_inserter(allIndices), [&](uint64_t dim) {
-                        return rewriter.create<IndexOp>(indexOp->getLoc(), dim);
+                        return IndexOp::create(rewriter, indexOp->getLoc(),
+                                               dim);
                       });
       rewriter.replaceOpWithNewOp<affine::AffineApplyOp>(
           indexOp, permutationMap.getSubMap(indexOp.getDim()), allIndices);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 488041a43a2ef..38f1a8b7247eb 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -49,7 +49,7 @@ static SmallVector<Value> makeCanonicalAffineApplies(OpBuilder &b, Location loc,
     auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e);
     SmallVector<Value> operands(vals);
     affine::canonicalizeMapAndOperands(&exprMap, &operands);
-    res.push_back(b.create<affine::AffineApplyOp>(loc, exprMap, operands));
+    res.push_back(affine::AffineApplyOp::create(b, loc, exprMap, operands));
   }
   return res;
 }
@@ -70,8 +70,9 @@ static void inlineRegionAndEmitStore(OpBuilder &b, Location loc, OpType op,
   Operation *terminator = block.getTerminator();
   for (OpOperand &operand : terminator->getOpOperands()) {
     Value toStore = map.lookupOrDefault(operand.get());
-    b.create<StoreOpTy>(loc, toStore, outputBuffers[operand.getOperandNumber()],
-                        indexing[operand.getOperandNumber()]);
+    StoreOpTy::create(b, loc, toStore,
+                      outputBuffers[operand.getOperandNumber()],
+                      indexing[operand.getOperandNumber()]);
   }
 }
 
@@ -145,7 +146,7 @@ static void emitScalarImplementation(OpBuilder &b, Location loc,
     auto indexing = makeCanonicalAffineApplies(
         b, loc, linalgOp.getMatchingIndexingMap(inputOperand), allIvsPlusDims);
     indexedValues.push_back(
-        b.create<LoadOpTy>(loc, inputOperand->get(), indexing));
+        LoadOpTy::create(b, loc, inputOperand->get(), indexing));
   }
   // 1.b. Emit load from output views.
   for (OpOperand &outputOperand : linalgOp.getDpsInitsMutable()) {
@@ -153,7 +154,7 @@ static void emitScalarImplementation(OpBuilder &b, Location loc,
         b, loc, linalgOp.getMatchingIndexingMap(&outputOperand),
         allIvsPlusDims);
     indexedValues.push_back(
-        b.create<LoadOpTy>(loc, outputOperand.get(), indexing));
+        LoadOpTy::create(b, loc, outputOperand.get(), indexing));
   }
 
   // TODO: When a region inliner exists, use it.
diff --git a/mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp
index 24b8765284fa5..83d12e314a36f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp
@@ -129,17 +129,17 @@ static Value createDestinationPassingStyleInitOperand(
     ImplicitLocOpBuilder &builder) {
   Value processLinearIndexInReductionGroup = mesh::createProcessLinearIndex(
       meshOp.getSymName(), reductionMeshAxes, builder);
-  Value zero = builder.create<arith::ConstantIndexOp>(0);
-  Value isLeadProcess = builder.create<arith::CmpIOp>(
-      builder.getI1Type(), arith::CmpIPredicate::eq,
+  Value zero = arith::ConstantIndexOp::create(builder, 0);
+  Value isLeadProcess = arith::CmpIOp::create(
+      builder, builder.getI1Type(), arith::CmpIPredicate::eq,
       processLinearIndexInReductionGroup, zero);
-  scf::IfOp ifOp = builder.create<scf::IfOp>(spmdizedOperand.getType(),
-                                             isLeadProcess, true, true);
+  scf::IfOp ifOp = scf::IfOp::create(builder, spmdizedOperand.getType(),
+                                     isLeadProcess, true, true);
   // Then block.
   {
     OpBuilder::InsertionGuard insertionGuard(builder);
     builder.setInsertionPointToEnd(&ifOp.getThenRegion().front());
-    builder.create<scf::YieldOp>(spmdizedOperand);
+    scf::YieldOp::create(builder, spmdizedOperand);
   }
 
   // Else block.
@@ -155,14 +155,14 @@ static Value createDestinationPassingStyleInitOperand(
     std::optional<TypedAttr> neutralEl =
         arith::getNeutralElement(combinerOps[0]);
 
-    Value init = builder.create<tensor::EmptyOp>(op.getLoc(), shape,
-                                                 neutralEl.value().getType());
+    Value init = tensor::EmptyOp::create(builder, op.getLoc(), shape,
+                                         neutralEl.value().getType());
     Value constant =
-        builder.create<arith::ConstantOp>(op.getLoc(), neutralEl.value());
-    Value fill = builder.create<linalg::FillOp>(op.getLoc(), constant, init)
+        arith::ConstantOp::create(builder, op.getLoc(), neutralEl.value());
+    Value fill = linalg::FillOp::create(builder, op.getLoc(), constant, init)
                      .getResult(0);
 
-    builder.create<scf::YieldOp>(fill);
+    scf::YieldOp::create(builder, fill);
   }
   return ifOp.getResult(0);
 }
@@ -194,9 +194,9 @@ static void createAllReduceForResultsWithoutPartialShardings(
        llvm::zip_equal(unshardedOp->getResults(), resultShardings)) {
     Value spmdizedLinalgOpResult =
         spmdizationMap.lookup(unshardedLinalgOpResult);
-    Value reducedValue = builder.create<mesh::AllReduceOp>(
-        spmdizedLinalgOpResult, resultSharding.getMesh(), opReductionMeshAxes,
-        reductionKind);
+    Value reducedValue = mesh::AllReduceOp::create(
+        builder, spmdizedLinalgOpResult, resultSharding.getMesh(),
+        opReductionMeshAxes, reductionKind);
     spmdizationMap.map(unshardedLinalgOpResult, reducedValue);
   }
 }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp b/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
index bb1e974391878..a2bd9d92815a0 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/NamedOpConversions.cpp
@@ -59,8 +59,8 @@ matchAndReplaceDepthwiseConv(Operation *operation, Value input, Value kernel,
   auto newKernelTy = RankedTensorType::get(
       {kernelTy.getDimSize(0), kernelTy.getDimSize(1), kernelTy.getDimSize(2)},
       kernelTy.getElementType());
-  auto collapsedKernel = rewriter.create<tensor::CollapseShapeOp>(
-      loc, newKernelTy, kernel, collapsedKernelDims);
+  auto collapsedKernel = tensor::CollapseShapeOp::create(
+      rewriter, loc, newKernelTy, kernel, collapsedKernelDims);
 
   // Collapse init dims.
   SmallVector<ReassociationIndices, 4> collapsedInitDims = {
@@ -70,22 +70,23 @@ matchAndReplaceDepthwiseConv(Operation *operation, Value input, Value kernel,
       RankedTensorType::get({initTy.getDimSize(0), initTy.getDimSize(1),
                              initTy.getDimSize(2), initTy.getDimSize(3)},
                             initTy.getElementType());
-  auto collapsedInit = rewriter.create<tensor::CollapseShapeOp>(
-      loc, newInitTy, init, collapsedInitDims);
+  auto collapsedInit = tensor::CollapseShapeOp::create(rewriter, loc, newInitTy,
+                                                       init, collapsedInitDims);
 
   SmallVector<NamedAttribute> preservedAttrs;
   Operation *newConv =
       TypeSwitch<Operation *, Operation *>(operation)
           .Case<DepthwiseConv2DNhwcHwcmOp>([&](auto op) {
             preservedAttrs = getPrunedAttributeList(op);
-            return rewriter.create<DepthwiseConv2DNhwcHwcOp>(
-                loc, newInitTy, ValueRange{input, collapsedKernel},
+            return DepthwiseConv2DNhwcHwcOp::create(
+                rewriter, loc, newInitTy, ValueRange{input, collapsedKernel},
                 ValueRange{collapsedInit}, stride, dilation);
           })
           .Case<DepthwiseConv2DNhwcHwcmQOp>([&](auto op) {
             preservedAttrs = getPrunedAttributeList(op);
-            return rewriter.create<DepthwiseConv2DNhwcHwcQOp>(
-                loc, newInitTy, ValueRange{input, collapsedKernel, iZp, kZp},
+            return DepthwiseConv2DNhwcHwcQOp::create(
+                rewriter, loc, newInitTy,
+                ValueRange{input, collapsedKernel, iZp, kZp},
                 ValueRange{collapsedInit}, stride, dilation);
           })
           .Default([](Operation *op) { return nullptr; });
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp
index 2afa2f9b71c2a..0415057eda86b 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PackAndUnpackPatterns.cpp
@@ -143,8 +143,8 @@ struct SimplifyUnPackToCollapseShape : public OpRewritePattern<UnPackOp> {
                        Type newOperandType, ArrayAttr reassociation) const {
     if (operand.getType() == newOperandType)
       return operand;
-    return rewriter.create<tensor::CollapseShapeOp>(loc, newOperandType,
-                                                    operand, reassociation);
+    return tensor::CollapseShapeOp::create(rewriter, loc, newOperandType,
+                                           operand, reassociation);
   }
 
   /// Returns success() if it is unpacking on the innermost dimension.
@@ -265,8 +265,8 @@ struct FoldUnpackWithExtractSliceOp
 
     // Create a new empty output tensor.
     Type elementType = unpackOp.getDestType().getElementType();
-    Value output = rewriter.create<tensor::EmptyOp>(
-        sliceOp.getLoc(), sliceOp.getMixedSizes(), elementType);
+    Value output = tensor::EmptyOp::create(
+        rewriter, sliceOp.getLoc(), sliceOp.getMixedSizes(), elementType);
     rewriter.replaceOpWithNewOp<UnPackOp>(
         sliceOp, unpackOp.getSource(), output, unpackOp.getInnerDimsPos(),
         unpackOp.getMixedTiles(), unpackOp.getOuterDimsPerm());
@@ -529,8 +529,8 @@ struct FoldConsumerUnPackWithProducerLinalgTransposeOp
 
     auto elemType =
         cast<ShapedType>(unPackOp->getResultTypes()[0]).getElementType();
-    Value output = rewriter.create<tensor::EmptyOp>(
-        unPackOp->getLoc(), unpackOpResultDims[0], elemType);
+    Value output = tensor::EmptyOp::create(rewriter, unPackOp->getLoc(),
+                                           unpackOpResultDims[0], elemType);
 
     rewriter.replaceOpWithNewOp<UnPackOp>(
         unPackOp, linalgOp->getOperand(0), output, newInnerDimsPosVec,
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
index 5eb3761f7aca1..2c62cb628a7dd 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
@@ -192,11 +192,11 @@ static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
   if (auto complexTy =
           dyn_cast<ComplexType>(getElementTypeOrSelf(v.getType()))) {
     auto complexAttr = cast<ArrayAttr>(paddingValueAttr);
-    paddingValue = rewriter.create<complex::ConstantOp>(opToPad.getLoc(),
-                                                        complexTy, complexAttr);
+    paddingValue = complex::ConstantOp::create(rewriter, opToPad.getLoc(),
+                                               complexTy, complexAttr);
   } else {
-    paddingValue = rewriter.create<arith::ConstantOp>(
-        opToPad.getLoc(), cast<TypedAttr>(paddingValueAttr));
+    paddingValue = arith::ConstantOp::create(rewriter, opToPad.getLoc(),
+                                             cast<TypedAttr>(paddingValueAttr));
   }
 
   // Pad the operand to the bounding box defined by `paddedShape`.
@@ -323,8 +323,8 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
     int64_t rank = cast<RankedTensorType>(paddedResult.getType()).getRank();
     SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
     SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
-    paddedSubtensorResults.push_back(rewriter.create<tensor::ExtractSliceOp>(
-        loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
+    paddedSubtensorResults.push_back(tensor::ExtractSliceOp::create(
+        rewriter, loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
         strides));
   }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
index dc9e11eccac4d..b5c5aea56a998 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
@@ -219,11 +219,11 @@ static FailureOr<Value> padOperandToSmallestStaticBoundingBox(
   if (auto complexTy = dyn_cast<ComplexType>(
           getElementTypeOrSelf(opOperand->get().getType()))) {
     auto complexAttr = cast<ArrayAttr>(paddingAttr);
-    paddingValue = rewriter.create<complex::ConstantOp>(opToPad.getLoc(),
-                                                        complexTy, complexAttr);
+    paddingValue = complex::ConstantOp::create(rewriter, opToPad.getLoc(),
+                                               complexTy, complexAttr);
   } else {
-    paddingValue = rewriter.create<arith::ConstantOp>(
-        opToPad.getLoc(), cast<TypedAttr>(paddingAttr));
+    paddingValue = arith::ConstantOp::create(rewriter, opToPad.getLoc(),
+                                             cast<TypedAttr>(paddingAttr));
   }
 
   // Computes the padded shape.
@@ -313,8 +313,8 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
     int64_t rank = cast<RankedTensorType>(paddedResult.getType()).getRank();
     SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
     SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
-    paddedSubtensorResults.push_back(rewriter.create<tensor::ExtractSliceOp>(
-        loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
+    paddedSubtensorResults.push_back(tensor::ExtractSliceOp::create(
+        rewriter, loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
         strides));
   }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 0433016b3f5cc..f05ffa8334d9c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -62,11 +62,11 @@ static Value allocBuffer(ImplicitLocOpBuilder &b,
     staticBufferType =
         MemRefType::Builder(staticBufferType).setMemorySpace(memorySpaceAttr);
     if (options.useAlloca) {
-      return b.create<memref::AllocaOp>(staticBufferType, ValueRange{},
-                                        alignmentAttr);
+      return memref::AllocaOp::create(b, staticBufferType, ValueRange{},
+                                      alignmentAttr);
     }
-    return b.create<memref::AllocOp>(staticBufferType, ValueRange{},
-                                     alignmentAttr);
+    return memref::AllocOp::create(b, staticBufferType, ValueRange{},
+                                   alignmentAttr);
   }
 
   // Fallback dynamic buffer.
@@ -75,10 +75,10 @@ static Value allocBuffer(ImplicitLocOpBuilder &b,
   dynamicBufferType =
       MemRefType::Builder(dynamicBufferType).setMemorySpace(memorySpaceAttr);
   Value mul = b.createOrFold<arith::MulIOp>(
-      b.create<arith::ConstantIndexOp>(width), allocSize);
+      arith::ConstantIndexOp::create(b, width), allocSize);
   if (options.useAlloca)
-    return b.create<memref::AllocaOp>(dynamicBufferType, mul, alignmentAttr);
-  return b.create<memref::AllocOp>(dynamicBufferType, mul, alignmentAttr);
+    return memref::AllocaOp::create(b, dynamicBufferType, mul, alignmentAttr);
+  return memref::AllocOp::create(b, dynamicBufferType, mul, alignmentAttr);
 }
 
 /// Default allocation callback function. This allocates a promoted buffer when
@@ -91,8 +91,8 @@ static std::optional<Value> defaultAllocBufferCallBack(
     std::optional<unsigned> alignment, DataLayout &layout) {
   ShapedType viewType = subView.getType();
   ImplicitLocOpBuilder b(subView.getLoc(), builder);
-  auto zero = b.create<arith::ConstantIndexOp>(0);
-  auto one = b.create<arith::ConstantIndexOp>(1);
+  auto zero = arith::ConstantIndexOp::create(b, 0);
+  auto one = arith::ConstantIndexOp::create(b, 1);
 
   Attribute memorySpaceAttr;
   if (options.memorySpace.has_value())
@@ -122,8 +122,8 @@ defaultDeallocBufferCallBack(const LinalgPromotionOptions &options,
                              OpBuilder &b, Value fullLocalView) {
   if (!options.useAlloca) {
     auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp());
-    b.create<memref::DeallocOp>(viewOp.getSource().getLoc(),
-                                viewOp.getSource());
+    memref::DeallocOp::create(b, viewOp.getSource().getLoc(),
+                              viewOp.getSource());
   }
   return success();
 }
@@ -210,7 +210,7 @@ LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
   Location loc = linalgOp.getLoc();
   auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
                                    Value dst) -> LogicalResult {
-    b.create<linalg::CopyOp>(loc, src, dst);
+    linalg::CopyOp::create(b, loc, src, dst);
     return success();
   };
   copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
@@ -264,7 +264,7 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
               /*stopCondition=*/nullptr, /*closedUB=*/true);
       size = failed(upperBound)
                  ? getValueOrCreateConstantIndexOp(b, loc, rangeValue.size)
-                 : b.create<arith::ConstantIndexOp>(loc, *upperBound);
+                 : arith::ConstantIndexOp::create(b, loc, *upperBound);
     }
     LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
     fullSizes.push_back(size);
@@ -309,23 +309,23 @@ promoteSubViews(ImplicitLocOpBuilder &b,
     Value fillVal =
         llvm::TypeSwitch<Type, Value>(subviewEltType)
             .Case([&](FloatType t) {
-              return b.create<arith::ConstantOp>(FloatAttr::get(t, 0.0));
+              return arith::ConstantOp::create(b, FloatAttr::get(t, 0.0));
             })
             .Case([&](IntegerType t) {
-              return b.create<arith::ConstantOp>(IntegerAttr::get(t, 0));
+              return arith::ConstantOp::create(b, IntegerAttr::get(t, 0));
             })
             .Case([&](ComplexType t) {
               Value tmp;
               if (auto et = dyn_cast<FloatType>(t.getElementType()))
-                tmp = b.create<arith::ConstantOp>(FloatAttr::get(et, 0.0));
+                tmp = arith::ConstantOp::create(b, FloatAttr::get(et, 0.0));
               else if (auto et = cast<IntegerType>(t.getElementType()))
-                tmp = b.create<arith::ConstantOp>(IntegerAttr::get(et, 0));
-              return b.create<complex::CreateOp>(t, tmp, tmp);
+                tmp = arith::ConstantOp::create(b, IntegerAttr::get(et, 0));
+              return complex::CreateOp::create(b, t, tmp, tmp);
             })
             .Default([](auto) { return Value(); });
     if (!fillVal)
       return failure();
-    b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView);
+    linalg::FillOp::create(b, fillVal, promotionInfo->fullLocalView);
   }
 
   // Copy data into the promoted buffers. Use callback if provided.
@@ -458,9 +458,9 @@ static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace(
       gpu::AddressSpaceAttr::get(builder.getContext(), addressSpace));
   Value buffer;
   if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) {
-    buffer = builder.create<memref::AllocOp>(funcOp.getLoc(), type);
+    buffer = memref::AllocOp::create(builder, funcOp.getLoc(), type);
   } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) {
-    buffer = builder.create<memref::AllocaOp>(funcOp.getLoc(), type);
+    buffer = memref::AllocaOp::create(builder, funcOp.getLoc(), type);
   } else {
     return std::nullopt;
   }
@@ -486,9 +486,9 @@ LogicalResult mlir::linalg::deallocateWorkgroupMemory(OpBuilder &,
 /// the copy operation to ensure data integrity.
 LogicalResult mlir::linalg::copyToWorkgroupMemory(OpBuilder &b, Value src,
                                                   Value dst) {
-  b.create<gpu::BarrierOp>(src.getLoc());
-  Operation *copyOp = b.create<memref::CopyOp>(src.getLoc(), src, dst);
-  b.create<gpu::BarrierOp>(copyOp->getLoc());
+  gpu::BarrierOp::create(b, src.getLoc());
+  Operation *copyOp = memref::CopyOp::create(b, src.getLoc(), src, dst);
+  gpu::BarrierOp::create(b, copyOp->getLoc());
   return success();
 }
 
@@ -503,7 +503,7 @@ std::optional<Value> mlir::linalg::allocateGPUPrivateMemory(
 /// Normal copy to between src and dst.
 LogicalResult mlir::linalg::copyToGPUPrivateMemory(OpBuilder &b, Value src,
                                                    Value dst) {
-  b.create<memref::CopyOp>(src.getLoc(), src, dst);
+  memref::CopyOp::create(b, src.getLoc(), src, dst);
   return success();
 }
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp b/mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp
index b30182dc84079..eac0e47b18a7d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp
@@ -38,8 +38,8 @@ struct StructuredOpInterface
     SmallVector<Range> loopRanges = linalgOp.createLoopRanges(builder, loc);
     auto [starts, ends, _] = getOffsetsSizesAndStrides(loopRanges);
 
-    auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
-    auto one = builder.create<arith::ConstantIndexOp>(loc, 1);
+    auto zero = arith::ConstantIndexOp::create(builder, loc, 0);
+    auto one = arith::ConstantIndexOp::create(builder, loc, 1);
 
     // Subtract one from the loop ends before composing with the indexing map
     transform(ends, ends.begin(), [&](OpFoldResult end) {
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp
index 671dea8bb415f..76d0ba97be4db 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp
@@ -52,8 +52,8 @@ createSplitPart(RewriterBase &b, Location loc, TilingInterface op,
       return nullptr;
     SmallVector<OpFoldResult> resultStrides(resultOffsets.size(),
                                             b.getIndexAttr(1));
-    Value inserted = b.create<tensor::InsertSliceOp>(
-        loc, result, resultOperands[index], resultOffsets, resultSizes,
+    Value inserted = tensor::InsertSliceOp::create(
+        b, loc, result, resultOperands[index], resultOffsets, resultSizes,
         resultStrides);
     results.push_back(inserted);
   }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
index 5bfdbc6d0bb59..b8f862001bccf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
@@ -115,8 +115,8 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReduction(
         newShape,
         cast<RankedTensorType>(operand->get().getType()).getElementType());
 
-    Value newInput = b.create<tensor::ExpandShapeOp>(
-        loc, newType, operand->get(), reassociation);
+    Value newInput = tensor::ExpandShapeOp::create(
+        b, loc, newType, operand->get(), reassociation);
     newInputs.push_back(newInput);
   }
 
@@ -140,18 +140,18 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReduction(
   }
   Value emptyOrAllocTensor;
   if (useAlloc) {
-    emptyOrAllocTensor = b.create<bufferization::AllocTensorOp>(
-        loc,
+    emptyOrAllocTensor = bufferization::AllocTensorOp::create(
+        b, loc,
         RankedTensorType::get(newOutputShape,
                               op.getRegionOutputArgs()[0].getType()),
         ValueRange{});
   } else {
-    emptyOrAllocTensor = b.create<tensor::EmptyOp>(
-        loc, newOutputShape, op.getRegionOutputArgs()[0].getType());
+    emptyOrAllocTensor = tensor::EmptyOp::create(
+        b, loc, newOutputShape, op.getRegionOutputArgs()[0].getType());
   }
-  Value constantOp = b.create<arith::ConstantOp>(loc, *identity);
+  Value constantOp = arith::ConstantOp::create(b, loc, *identity);
   Value identityTensor =
-      b.create<linalg::FillOp>(op->getLoc(), constantOp, emptyOrAllocTensor)
+      linalg::FillOp::create(b, op->getLoc(), constantOp, emptyOrAllocTensor)
           .getResult(0);
 
   newMaps.push_back(AffineMap::get(oldOutputMap.getNumDims() + 1, 0, outputExpr,
@@ -168,8 +168,8 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReduction(
   }
   // Create the new op matching the original op with an extra parallel
   // dimension.
-  GenericOp genericOp = b.create<GenericOp>(
-      loc, TypeRange({emptyOrAllocTensor.getType()}), newInputs,
+  GenericOp genericOp = GenericOp::create(
+      b, loc, TypeRange({emptyOrAllocTensor.getType()}), newInputs,
       ValueRange({identityTensor}), newMaps, newIteratorTypes);
   b.inlineRegionBefore(op->getRegion(0), genericOp.getRegion(),
                        genericOp.getRegion().begin());
@@ -191,14 +191,14 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReduction(
   AffineMap outputMap = AffineMap::get(intermRank, 0, exprs, op.getContext());
   SmallVector<AffineMap> reductionMaps = {inputMap, outputMap};
 
-  auto reduction = b.create<GenericOp>(
-      loc, op->getResultTypes(), ValueRange({genericOp.getResult(0)}),
+  auto reduction = GenericOp::create(
+      b, loc, op->getResultTypes(), ValueRange({genericOp.getResult(0)}),
       op.getDpsInits(), reductionMaps, reductionIteratorTypes,
       [reductionOp](OpBuilder &b, Location loc, ValueRange inputs) {
         Operation *clonedReductionOp = b.clone(*reductionOp);
         clonedReductionOp->setOperand(0, inputs[0]);
         clonedReductionOp->setOperand(1, inputs[1]);
-        b.create<linalg::YieldOp>(loc, clonedReductionOp->getResult(0));
+        linalg::YieldOp::create(b, loc, clonedReductionOp->getResult(0));
       });
   b.replaceOp(op, reduction.getResults());
 
@@ -318,14 +318,14 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
     Value emptyOrAllocTensor;
     if (useAlloc) {
       emptyOrAllocTensor =
-          b.create<bufferization::AllocTensorOp>(loc, newT, dims);
+          bufferization::AllocTensorOp::create(b, loc, newT, dims);
     } else {
-      emptyOrAllocTensor = b.create<tensor::EmptyOp>(loc, newT.getShape(),
-                                                     t.getElementType(), dims);
+      emptyOrAllocTensor = tensor::EmptyOp::create(b, loc, newT.getShape(),
+                                                   t.getElementType(), dims);
     }
-    Value constantOp = b.create<arith::ConstantOp>(loc, std::get<1>(it));
-    fillOps.push_back(
-        b.create<linalg::FillOp>(op->getLoc(), constantOp, emptyOrAllocTensor));
+    Value constantOp = arith::ConstantOp::create(b, loc, std::get<1>(it));
+    fillOps.push_back(linalg::FillOp::create(b, op->getLoc(), constantOp,
+                                             emptyOrAllocTensor));
     newOutputs.push_back(fillOps.back().getResult(0));
     emptyOrAllocTensorOps.push_back(emptyOrAllocTensor.getDefiningOp());
   }
@@ -354,8 +354,8 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
   SmallVector<Value> newInputs = op.getDpsInputs();
   // Add a single shape-only tensor to carry the dimensions without resorting to
   // more complex inversions.
-  newInputs.push_back(b.create<tensor::EmptyOp>(
-      loc, ArrayRef<int64_t>{reductionDimSize / splitFactor, splitFactor},
+  newInputs.push_back(tensor::EmptyOp::create(
+      b, loc, ArrayRef<int64_t>{reductionDimSize / splitFactor, splitFactor},
       b.getIntegerType(1)));
   // Output tensors are already good to go.
 
@@ -365,8 +365,8 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
   iteratorTypes.insert(iteratorTypes.begin() + reductionDimPos,
                        utils::IteratorType::parallel);
   GenericOp genericOp =
-      b.create<GenericOp>(loc, ValueRange(newOutputs).getTypes(), newInputs,
-                          newOutputs, newMaps, iteratorTypes);
+      GenericOp::create(b, loc, ValueRange(newOutputs).getTypes(), newInputs,
+                        newOutputs, newMaps, iteratorTypes);
   b.inlineRegionBefore(op->getRegion(0), genericOp.getRegion(),
                        genericOp.getRegion().begin());
   genericOp.getRegion().front().insertArgument(reductionDimPos,
@@ -396,7 +396,7 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
         utils::IteratorType::reduction;
 
     // clang-format off
-    auto reductionOp = b.create<GenericOp>(
+    auto reductionOp = GenericOp::create(b,
         loc,
         originalOutputType,
         reindexedOutput,
@@ -407,7 +407,7 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
           Operation *clonedReductionOp = b.clone(*combinerOp);
           clonedReductionOp->setOperand(0, bbArgs[0]);
           clonedReductionOp->setOperand(1, bbArgs[1]);
-          b.create<linalg::YieldOp>(loc, clonedReductionOp->getResult(0));
+          linalg::YieldOp::create(b, loc, clonedReductionOp->getResult(0));
         });
     // clang-format on
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/SwapExtractSliceWithFillPatterns.cpp b/mlir/lib/Dialect/Linalg/Transforms/SwapExtractSliceWithFillPatterns.cpp
index d35aad514e884..792ca3e5123ef 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/SwapExtractSliceWithFillPatterns.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/SwapExtractSliceWithFillPatterns.cpp
@@ -29,10 +29,10 @@ struct SwapExtractSliceOfFill final
     if (!fillOp || !fillOp->hasOneUse())
       return failure();
 
-    auto newExtractOp = rewriter.create<tensor::ExtractSliceOp>(
-        extractOp.getLoc(), extractOp.getType(), fillOp.getOutputs()[0],
-        extractOp.getMixedOffsets(), extractOp.getMixedSizes(),
-        extractOp.getMixedStrides());
+    auto newExtractOp = tensor::ExtractSliceOp::create(
+        rewriter, extractOp.getLoc(), extractOp.getType(),
+        fillOp.getOutputs()[0], extractOp.getMixedOffsets(),
+        extractOp.getMixedSizes(), extractOp.getMixedStrides());
     rewriter.replaceOpWithNewOp<FillOp>(extractOp, fillOp.getInputs(),
                                         ValueRange{newExtractOp.getResult()});
     return success();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 4741afe8a417d..705d6f26efd29 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -94,11 +94,11 @@ static void emitIsPositiveIndexAssertion(ImplicitLocOpBuilder &b,
     return;
   }
 
-  Value zero = b.create<arith::ConstantIndexOp>(0);
-  Value condition = b.create<arith::CmpIOp>(arith::CmpIPredicate::sgt,
-                                            cast<Value>(value), zero);
-  b.create<cf::AssertOp>(
-      condition,
+  Value zero = arith::ConstantIndexOp::create(b, 0);
+  Value condition = arith::CmpIOp::create(b, arith::CmpIPredicate::sgt,
+                                          cast<Value>(value), zero);
+  cf::AssertOp::create(
+      b, condition,
       b.getStringAttr("expected strictly positive tile size and divisor"));
 }
 
@@ -317,11 +317,12 @@ mlir::linalg::computeMultiTileSizes(OpBuilder &builder, LinalgOp op,
     Value coveredSize =
         apply(s0 * s1 + s2 * s3, {spec.lowTileSize, spec.lowTripCount,
                                   spec.highTileSize, spec.highTripCount});
-    Value equals = b.create<arith::CmpIOp>(arith::CmpIPredicate::eq,
-                                           coveredSize, tripCount);
-    b.create<cf::AssertOp>(
-        equals, builder.getStringAttr(
-                    "could not compute dynamic multi-size tile shapes"));
+    Value equals = arith::CmpIOp::create(b, arith::CmpIPredicate::eq,
+                                         coveredSize, tripCount);
+    cf::AssertOp::create(
+        b, equals,
+        builder.getStringAttr(
+            "could not compute dynamic multi-size tile shapes"));
   }
 
   return spec;
@@ -656,8 +657,8 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
       getValueOrCreateConstantIndexOp(b, loc, nonZeroNumThreads);
 
   // 2. Create the ForallOp with an empty region.
-  scf::ForallOp forallOp = b.create<scf::ForallOp>(
-      loc, getAsOpFoldResult(materializedNonZeroNumThreads), initTensors,
+  scf::ForallOp forallOp = scf::ForallOp::create(
+      b, loc, getAsOpFoldResult(materializedNonZeroNumThreads), initTensors,
       mapping);
 
   // 3. Calculate the tile offsets and sizes for the subsequent loop that will
@@ -689,8 +690,8 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
       sizes[reductionDim] = b.getIndexAttr(1);
       outOffsets[reductionDim] = forallOp.getInductionVars()[0];
       // TODO: use SubsetExtractOpInterface once it is available.
-      tiledDpsInitOperands.push_back(b.create<tensor::ExtractSliceOp>(
-          loc, cast<RankedTensorType>(initOperand.getType()),
+      tiledDpsInitOperands.push_back(tensor::ExtractSliceOp::create(
+          b, loc, cast<RankedTensorType>(initOperand.getType()),
           destBbArgs[destNum], outOffsets, sizes, strides));
     }
 
@@ -768,8 +769,8 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
     // 6.b. Parallel insertions are inserted at the end of the combining
     // terminator.
     b.setInsertionPointToEnd(forallOp.getTerminator().getBody());
-    b.create<tensor::ParallelInsertSliceOp>(
-        loc, result, bbArg, resultOffsetsRank, resultSizesRank, strides);
+    tensor::ParallelInsertSliceOp::create(
+        b, loc, result, bbArg, resultOffsetsRank, resultSizesRank, strides);
   }
 
   // 7. Merge the partial reductions.
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index 28d99b130963a..dad352643abe3 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -45,7 +45,7 @@ static SmallVector<Value> getIndicesForAccess(OpBuilder &b, Location loc,
   for (auto result : indexingMap.getResults()) {
     AffineMap m = AffineMap::get(indexingMap.getNumDims(),
                                  indexingMap.getNumSymbols(), result);
-    Value v = b.create<affine::AffineApplyOp>(loc, m, ivs);
+    Value v = affine::AffineApplyOp::create(b, loc, m, ivs);
     indices.push_back(v);
   }
   return indices;
@@ -73,9 +73,9 @@ static LogicalResult inlinePayload(OpBuilder &b, LinalgOp linalgOp,
     OpOperand *storeInto = linalgOp.getDpsInitOperand(operand.index());
     auto indices = getIndicesForAccess(
         b, loc, linalgOp.getMatchingIndexingMap(storeInto), ivs);
-    b.create<memref::StoreOp>(
-        loc, toStore, linalgOp.getDpsInitOperand(operand.index())->get(),
-        indices);
+    memref::StoreOp::create(b, loc, toStore,
+                            linalgOp.getDpsInitOperand(operand.index())->get(),
+                            indices);
   }
   return success();
 }
@@ -352,7 +352,7 @@ struct LinalgOpTilingInterface
       SmallVector<Value> indices = getIndicesForAccess(
           builder, linalgOpLoc, linalgOp.getMatchingIndexingMap(&operand), ivs);
       Value load =
-          builder.create<memref::LoadOp>(linalgOpLoc, operand.get(), indices);
+          memref::LoadOp::create(builder, linalgOpLoc, operand.get(), indices);
       indexedValues.push_back(load);
     }
 
@@ -520,10 +520,10 @@ struct LinalgOpPartialReductionInterface
 
       Type elType = getElementTypeOrSelf(result.getType());
       Value emptyTensor =
-          b.create<tensor::EmptyOp>(loc, partialResultShape, elType);
-      Value constantOp = b.create<arith::ConstantOp>(loc, *identity);
+          tensor::EmptyOp::create(b, loc, partialResultShape, elType);
+      Value constantOp = arith::ConstantOp::create(b, loc, *identity);
       auto identityTensor =
-          b.create<linalg::FillOp>(loc, constantOp, emptyTensor);
+          linalg::FillOp::create(b, loc, constantOp, emptyTensor);
       inits.push_back(identityTensor.getResult(0));
     }
 
@@ -575,9 +575,9 @@ struct LinalgOpPartialReductionInterface
       RankedTensorType sliceResultType = RankedTensorType::get(
           sliceInfo.resultShape, valueToTileType.getElementType(),
           valueToTileType.getEncoding());
-      auto sliceOp = b.create<tensor::ExtractSliceOp>(
-          loc, sliceResultType, valueToTile, sliceInfo.offsets, sliceInfo.sizes,
-          sliceInfo.strides);
+      auto sliceOp = tensor::ExtractSliceOp::create(
+          b, loc, sliceResultType, valueToTile, sliceInfo.offsets,
+          sliceInfo.sizes, sliceInfo.strides);
       tiledInits.push_back(sliceOp.getResult());
       generatedSlices.push_back(sliceOp);
     }
@@ -604,8 +604,8 @@ struct LinalgOpPartialReductionInterface
     auto resultTypes = ValueRange(tiledInits).getTypes();
     if (tilingStrategy ==
         ReductionTilingStrategy::PartialReductionOuterReduction) {
-      auto genericOp = b.create<GenericOp>(
-          loc, resultTypes, tiledInputs, tiledInits, newMaps, newIteratorTypes);
+      auto genericOp = GenericOp::create(b, loc, resultTypes, tiledInputs,
+                                         tiledInits, newMaps, newIteratorTypes);
       IRMapping mapping;
       op->getRegion(0).cloneInto(&genericOp.getRegion(),
                                  genericOp.getRegion().begin(), mapping);
@@ -649,8 +649,8 @@ struct LinalgOpPartialReductionInterface
         }
       }
 
-      auto reduction = b.create<linalg::ReduceOp>(
-          loc, partialResult, init, partialReductionDims,
+      auto reduction = linalg::ReduceOp::create(
+          b, loc, partialResult, init, partialReductionDims,
           [&linalgOp, &initIdx](OpBuilder &b, Location loc, ValueRange inputs) {
             // Get the combiner op.
             SmallVector<Operation *, 4> combinerOps;
@@ -660,7 +660,7 @@ struct LinalgOpPartialReductionInterface
             // Combine the input at idx and output at numInits + idx.
             clonedReductionOp->setOperand(0, inputs[0]);
             clonedReductionOp->setOperand(1, inputs[1]);
-            b.create<linalg::YieldOp>(loc, clonedReductionOp->getResult(0));
+            linalg::YieldOp::create(b, loc, clonedReductionOp->getResult(0));
           });
 
       mergeOperations.push_back(reduction);
@@ -791,8 +791,8 @@ struct PackOpTiling
     SmallVector<OpFoldResult> strides(inputRank, oneAttr);
 
     SmallVector<Value> tiledOperands;
-    auto sourceSlice = b.create<tensor::ExtractSliceOp>(
-        loc, packOp.getSource(), inputIndices, inputSizes, strides);
+    auto sourceSlice = tensor::ExtractSliceOp::create(
+        b, loc, packOp.getSource(), inputIndices, inputSizes, strides);
     tiledOperands.push_back(sourceSlice);
 
     SmallVector<OpFoldResult> outputOffsets, outputSizes;
@@ -801,8 +801,8 @@ struct PackOpTiling
       return {};
 
     strides.append(packOp.getDestRank() - inputRank, oneAttr);
-    auto outSlice = b.create<tensor::ExtractSliceOp>(
-        loc, packOp.getDest(), outputOffsets, outputSizes, strides);
+    auto outSlice = tensor::ExtractSliceOp::create(
+        b, loc, packOp.getDest(), outputOffsets, outputSizes, strides);
     tiledOperands.push_back(outSlice);
 
     if (auto val = packOp.getPaddingValue())
@@ -810,8 +810,8 @@ struct PackOpTiling
     for (auto tile : packOp.getInnerTiles())
       tiledOperands.push_back(tile);
 
-    Operation *tiledPackOp = b.create<PackOp>(
-        loc, TypeRange{outSlice.getType()}, tiledOperands, op->getAttrs());
+    Operation *tiledPackOp = PackOp::create(
+        b, loc, TypeRange{outSlice.getType()}, tiledOperands, op->getAttrs());
 
     return TilingResult{
         {tiledPackOp},
@@ -1007,8 +1007,8 @@ struct PackOpTiling
     SmallVector<OpFoldResult> strides(inputRank, oneAttr);
 
     SmallVector<Value> tiledOperands;
-    auto sourceSlice = b.create<tensor::ExtractSliceOp>(
-        loc, packOp.getSource(), offsets, sizes, strides);
+    auto sourceSlice = tensor::ExtractSliceOp::create(
+        b, loc, packOp.getSource(), offsets, sizes, strides);
     tiledOperands.push_back(sourceSlice);
 
     SmallVector<OpFoldResult> outerDimOffsets, outerDimSizes;
@@ -1023,8 +1023,8 @@ struct PackOpTiling
       return failure();
 
     strides.append(packOp.getDestRank() - inputRank, oneAttr);
-    auto outSlice = b.create<tensor::ExtractSliceOp>(
-        loc, packOp.getDest(), outputOffsets, outputSizes, strides);
+    auto outSlice = tensor::ExtractSliceOp::create(
+        b, loc, packOp.getDest(), outputOffsets, outputSizes, strides);
     tiledOperands.push_back(outSlice);
 
     if (auto val = packOp.getPaddingValue())
@@ -1032,8 +1032,8 @@ struct PackOpTiling
     for (auto tile : packOp.getInnerTiles())
       tiledOperands.push_back(tile);
 
-    Operation *tiledPackOp = b.create<PackOp>(
-        loc, TypeRange{outSlice.getType()}, tiledOperands, op->getAttrs());
+    Operation *tiledPackOp = PackOp::create(
+        b, loc, TypeRange{outSlice.getType()}, tiledOperands, op->getAttrs());
 
     return TilingResult{
         {tiledPackOp},
@@ -1212,37 +1212,37 @@ struct UnPackOpTiling
     sliceSrcSizes.append(unpackOp.getMixedTiles());
     sliceSrcStrides.append(numInnerTiles, oneAttr);
     SmallVector<Operation *> generatedSlices;
-    tensor::ExtractSliceOp sliceSource = b.create<tensor::ExtractSliceOp>(
-        loc, unpackOp.getSource(), sliceSrcIndices, sliceSrcSizes,
+    tensor::ExtractSliceOp sliceSource = tensor::ExtractSliceOp::create(
+        b, loc, unpackOp.getSource(), sliceSrcIndices, sliceSrcSizes,
         sliceSrcStrides);
     generatedSlices.push_back(sliceSource);
 
     SmallVector<OpFoldResult> destStrides(destRank, oneAttr);
     Value sliceDest;
     if (isPerfectTilingCase) {
-      auto destSliceOp = b.create<tensor::ExtractSliceOp>(
-          loc, unpackOp.getDest(), offsets, sizes, destStrides);
+      auto destSliceOp = tensor::ExtractSliceOp::create(
+          b, loc, unpackOp.getDest(), offsets, sizes, destStrides);
       sliceDest = destSliceOp;
       generatedSlices.push_back(destSliceOp);
     } else {
-      sliceDest = b.create<tensor::EmptyOp>(
-          loc, destExpandedSizes, unpackOp.getDestType().getElementType());
+      sliceDest = tensor::EmptyOp::create(
+          b, loc, destExpandedSizes, unpackOp.getDestType().getElementType());
     }
 
     SmallVector<Value> tiledOperands = {sliceSource.getResult(), sliceDest};
     for (auto tile : unpackOp.getInnerTiles())
       tiledOperands.push_back(tile);
 
-    Operation *tiledUnpackOp = b.create<UnPackOp>(
-        loc, TypeRange{sliceDest.getType()}, tiledOperands, op->getAttrs());
+    Operation *tiledUnpackOp = UnPackOp::create(
+        b, loc, TypeRange{sliceDest.getType()}, tiledOperands, op->getAttrs());
 
     if (isPerfectTilingCase)
       return TilingResult{{tiledUnpackOp},
                           SmallVector<Value>(tiledUnpackOp->getResults()),
                           generatedSlices};
 
-    auto extractSlice = b.create<tensor::ExtractSliceOp>(
-        loc, tiledUnpackOp->getResult(0), resultOffsetsFromDest, sizes,
+    auto extractSlice = tensor::ExtractSliceOp::create(
+        b, loc, tiledUnpackOp->getResult(0), resultOffsetsFromDest, sizes,
         destStrides);
     return TilingResult{
         {tiledUnpackOp}, {extractSlice.getResult()}, generatedSlices};
@@ -1377,22 +1377,22 @@ struct UnPackOpTiling
 
     SmallVector<Value> tiledOperands;
     // Create slice of the dest operand.
-    auto extractDestSlice = b.create<tensor::ExtractSliceOp>(
-        loc, unPackOp.getDest(), outputOffsets, outputSizes, strides);
+    auto extractDestSlice = tensor::ExtractSliceOp::create(
+        b, loc, unPackOp.getDest(), outputOffsets, outputSizes, strides);
     tiledOperands.push_back(extractDestSlice);
 
     strides.append(unPackOp.getSourceRank() - outputRank, oneAttr);
     // Create slice of the source operand.
-    auto extractSourceSlice = b.create<tensor::ExtractSliceOp>(
-        loc, unPackOp.getSource(), offsets, sizes, strides);
+    auto extractSourceSlice = tensor::ExtractSliceOp::create(
+        b, loc, unPackOp.getSource(), offsets, sizes, strides);
     tiledOperands.insert(tiledOperands.begin(), extractSourceSlice);
     for (auto tile : unPackOp.getInnerTiles())
       tiledOperands.push_back(tile);
 
     // Create tiled unpack op.
     Operation *tiledUnPackOp =
-        b.create<UnPackOp>(loc, TypeRange{extractDestSlice.getType()},
-                           tiledOperands, op->getAttrs());
+        UnPackOp::create(b, loc, TypeRange{extractDestSlice.getType()},
+                         tiledOperands, op->getAttrs());
 
     return TilingResult{{tiledUnPackOp},
                         SmallVector<Value>(tiledUnPackOp->getResults()),
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index eab74dab4eb75..1f1e617738981 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -269,12 +269,12 @@ FailureOr<LowerPackResult> linalg::lowerPack(RewriterBase &rewriter,
       packingMetadata.reassociations);
   Value paddingValue = packOp.getPaddingValue();
   if (!paddingValue) {
-    paddingValue = rewriter.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(getElementTypeOrSelf(collapsed)));
+    paddingValue = arith::ConstantOp::create(
+        rewriter, loc, rewriter.getZeroAttr(getElementTypeOrSelf(collapsed)));
   }
   auto padOp =
-      rewriter.create<tensor::PadOp>(loc, collapsed, packOp.getSource(), lows,
-                                     highs, paddingValue, /*nofold=*/false);
+      tensor::PadOp::create(rewriter, loc, collapsed, packOp.getSource(), lows,
+                            highs, paddingValue, /*nofold=*/false);
 
   LLVM_DEBUG(
       DBGSNL(); DBGSNL();
@@ -313,8 +313,8 @@ FailureOr<LowerPackResult> linalg::lowerPack(RewriterBase &rewriter,
       SmallVector<OpFoldResult> sizes =
           tensor::getMixedSizes(rewriter, loc, packOp.getDest());
 
-      auto insertSliceOp = rewriter.create<tensor::InsertSliceOp>(
-          loc, /*source=*/padOp, /*dest=*/packOp.getDest(),
+      auto insertSliceOp = tensor::InsertSliceOp::create(
+          rewriter, loc, /*source=*/padOp, /*dest=*/packOp.getDest(),
           /*offsets=*/zeros, sizes, /*strides=*/ones);
 
       LLVM_DEBUG(DBGS() << "insert_slice op: " << insertSliceOp; DBGSNL(););
@@ -329,15 +329,15 @@ FailureOr<LowerPackResult> linalg::lowerPack(RewriterBase &rewriter,
   // 5. Expand from the padded result to the stripMinedShape.
   auto expandShapeResultType =
       RankedTensorType::Builder(packedTensorType).setShape(stripMinedShape);
-  auto reshapeOp = rewriter.create<tensor::ExpandShapeOp>(
-      loc, expandShapeResultType, padOp.getResult(),
+  auto reshapeOp = tensor::ExpandShapeOp::create(
+      rewriter, loc, expandShapeResultType, padOp.getResult(),
       packingMetadata.reassociations);
 
   // 6. Transpose stripMinedShape to packedShape.
   SmallVector<int64_t> transpPerm =
       invertPermutationVector(packedToStripMinedShapePerm);
-  auto transposeOp = rewriter.create<linalg::TransposeOp>(
-      loc, reshapeOp.getResult(), packOp.getDest(), transpPerm);
+  auto transposeOp = linalg::TransposeOp::create(
+      rewriter, loc, reshapeOp.getResult(), packOp.getDest(), transpPerm);
 
   LLVM_DEBUG(DBGSNL(); DBGSNL(); DBGSNL();
              DBGS() << "reshape op: " << reshapeOp; DBGSNL();
@@ -371,8 +371,8 @@ linalg::lowerUnPack(RewriterBase &rewriter, linalg::UnPackOp unPackOp,
     SmallVector<OpFoldResult> sizes(packedRank - destShape.size(), one);
     sizes.append(tensor::getMixedSizes(rewriter, loc, unPackOp.getDest()));
 
-    auto extractSliceOp = rewriter.create<tensor::ExtractSliceOp>(
-        loc, destTensorType, unPackOp.getSource(),
+    auto extractSliceOp = tensor::ExtractSliceOp::create(
+        rewriter, loc, destTensorType, unPackOp.getSource(),
         SmallVector<OpFoldResult>(packedRank, zero), sizes,
         SmallVector<OpFoldResult>(packedRank, one));
 
@@ -404,10 +404,11 @@ linalg::lowerUnPack(RewriterBase &rewriter, linalg::UnPackOp unPackOp,
   SmallVector<OpFoldResult, 4> dims =
       tensor::getMixedSizes(rewriter, loc, unPackOp.getSource());
   applyPermutationToVector(dims, packedToStripMinedShapePerm);
-  auto emptyOp = rewriter.create<tensor::EmptyOp>(
-      loc, dims, stripMinedTensorType.getElementType());
-  auto transposeOp = rewriter.create<linalg::TransposeOp>(
-      loc, unPackOp.getSource(), emptyOp, packedToStripMinedShapePerm);
+  auto emptyOp = tensor::EmptyOp::create(rewriter, loc, dims,
+                                         stripMinedTensorType.getElementType());
+  auto transposeOp =
+      linalg::TransposeOp::create(rewriter, loc, unPackOp.getSource(), emptyOp,
+                                  packedToStripMinedShapePerm);
 
   LLVM_DEBUG(
       DBGSNL(); DBGSNL();
@@ -426,21 +427,21 @@ linalg::lowerUnPack(RewriterBase &rewriter, linalg::UnPackOp unPackOp,
       DBGSNL(); DBGS() << "collapsed type: " << collapsedType; DBGSNL(););
 
   // 4. Collapse from the stripMinedShape to the padded result.
-  auto reshapeOp = rewriter.create<tensor::CollapseShapeOp>(
-      loc, collapsedType, transposeOp->getResult(0),
+  auto reshapeOp = tensor::CollapseShapeOp::create(
+      rewriter, loc, collapsedType, transposeOp->getResult(0),
       packingMetadata.reassociations);
 
   // 5. ExtractSlice.
   int64_t destRank = destTensorType.getRank();
-  auto extractSliceOp = rewriter.create<tensor::ExtractSliceOp>(
-      loc, destTensorType, reshapeOp->getResult(0),
+  auto extractSliceOp = tensor::ExtractSliceOp::create(
+      rewriter, loc, destTensorType, reshapeOp->getResult(0),
       SmallVector<OpFoldResult>(destRank, zero),
       tensor::getMixedSizes(rewriter, loc, unPackOp.getDest()),
       SmallVector<OpFoldResult>(destRank, one));
 
   // 6. Inject a copy to preserve DPS.
-  auto copyOp = rewriter.create<linalg::CopyOp>(
-      loc, extractSliceOp->getResult(0), unPackOp.getDest());
+  auto copyOp = linalg::CopyOp::create(
+      rewriter, loc, extractSliceOp->getResult(0), unPackOp.getDest());
 
   // 7. Replace unPackOp by copyOp.
   rewriter.replaceOp(unPackOp, copyOp->getResults());
@@ -554,16 +555,16 @@ FailureOr<PackResult> linalg::pack(RewriterBase &rewriter,
               operandType.getShape(), innerPos,
               cast<ShapedType>(dest.getType()).getShape(), {},
               innerPackSizes)) {
-        packOps.push_back(rewriter.create<linalg::PackOp>(
-            loc, operand, dest, innerPos, innerPackSizes));
+        packOps.push_back(linalg::PackOp::create(rewriter, loc, operand, dest,
+                                                 innerPos, innerPackSizes));
       } else {
         // TODO: value of the padding attribute should be determined by
         // consumers.
         auto zeroAttr =
             rewriter.getZeroAttr(getElementTypeOrSelf(dest.getType()));
-        Value zero = rewriter.create<arith::ConstantOp>(loc, zeroAttr);
-        packOps.push_back(rewriter.create<linalg::PackOp>(
-            loc, operand, dest, innerPos, innerPackSizes, zero));
+        Value zero = arith::ConstantOp::create(rewriter, loc, zeroAttr);
+        packOps.push_back(linalg::PackOp::create(
+            rewriter, loc, operand, dest, innerPos, innerPackSizes, zero));
       }
       inputsAndInits.push_back(packOps.back());
     }
@@ -574,9 +575,9 @@ FailureOr<PackResult> linalg::pack(RewriterBase &rewriter,
       ValueRange{inputsAndInits}.take_front(linalgOp.getNumDpsInputs());
   ValueRange inits =
       ValueRange{inputsAndInits}.take_back(linalgOp.getNumDpsInits());
-  auto packedLinalgOp = rewriter.create<linalg::GenericOp>(
-      linalgOp.getLoc(), inits.getTypes(), inputs, inits, indexingMaps,
-      iteratorTypes);
+  auto packedLinalgOp =
+      linalg::GenericOp::create(rewriter, linalgOp.getLoc(), inits.getTypes(),
+                                inputs, inits, indexingMaps, iteratorTypes);
   packedLinalgOp.getRegion().takeBody(linalgOp->getRegion(0));
 
   // Step 4. Propagate packing to all the op results.
@@ -589,8 +590,8 @@ FailureOr<PackResult> linalg::pack(RewriterBase &rewriter,
       continue;
     }
     // Build the symmetrical UnPackOp to the existing PackOp.
-    unPackOps.push_back(rewriter.create<linalg::UnPackOp>(
-        packedLinalgOp->getLoc(), result, maybePackedInit.getSource(),
+    unPackOps.push_back(linalg::UnPackOp::create(
+        rewriter, packedLinalgOp->getLoc(), result, maybePackedInit.getSource(),
         maybePackedInit.getInnerDimsPos(), maybePackedInit.getMixedTiles()));
     results.push_back(unPackOps.back());
   }
@@ -655,7 +656,8 @@ static LinalgOp transposeOneLinalgOperandAndReplace(
   operands[opOperand.getOperandNumber()] = transposedValue;
 
   ValueRange operandsRef(operands);
-  auto transposedGenericOp = rewriter.create<linalg::GenericOp>(
+  auto transposedGenericOp = linalg::GenericOp::create(
+      rewriter,
       /*location=*/linalgOp->getLoc(),
       /*resultTensorTypes=*/
       operandsRef.drop_front(linalgOp.getNumDpsInputs()).getTypes(),
@@ -904,7 +906,7 @@ mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
     b.setInsertionPointToStart(
         &op->getParentOfType<func::FuncOp>().getBody().front());
     return llvm::to_vector<4>(map_range(tileSizes, [&](int64_t s) {
-      Value v = b.create<arith::ConstantIndexOp>(op->getLoc(), s);
+      Value v = arith::ConstantIndexOp::create(b, op->getLoc(), s);
       return v;
     }));
   };
@@ -926,12 +928,12 @@ Value DecomposePadOpPattern::createFillOrGenerateOp(
     // Move the padding value defined inside the PadOp block to outside.
     if (padValue.getParentBlock() == &padOp.getRegion().front())
       rewriter.moveOpBefore(padValue.getDefiningOp(), padOp);
-    return rewriter.create<FillOp>(padOp.getLoc(), padValue, dest).result();
+    return FillOp::create(rewriter, padOp.getLoc(), padValue, dest).result();
   }
 
   // Fill could not be optimized: Lower to tensor::GenerateOp with region.
-  auto generateOp = rewriter.create<tensor::GenerateOp>(
-      padOp.getLoc(), padOp.getResultType(), dynSizes);
+  auto generateOp = tensor::GenerateOp::create(rewriter, padOp.getLoc(),
+                                               padOp.getResultType(), dynSizes);
   // Copy region to new op.
   IRMapping bvm;
   padOp.getRegion().cloneInto(&generateOp.getRegion(), bvm);
@@ -970,8 +972,9 @@ DecomposePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
   }
 
   // Init tensor and fill it with padding.
-  Value emptyTensor = rewriter.create<tensor::EmptyOp>(
-      padOp.getLoc(), staticSizes, resultType.getElementType(), dynSizes);
+  Value emptyTensor =
+      tensor::EmptyOp::create(rewriter, padOp.getLoc(), staticSizes,
+                              resultType.getElementType(), dynSizes);
   Value fill = createFillOrGenerateOp(rewriter, padOp, emptyTensor, dynSizes);
 
   // Generate a InsertSliceOp for copying the PadOp source.
@@ -1222,12 +1225,13 @@ LogicalResult DecomposeOuterUnitDimsPackOpPattern::matchAndRewrite(
 
   applyPermutationToVector<OpFoldResult>(transShapeForEmptyOp,
                                          srcPermForTranspose);
-  Value empty = rewriter.create<tensor::EmptyOp>(
-      loc, transShapeForEmptyOp, packOp.getSourceType().getElementType());
+  Value empty =
+      tensor::EmptyOp::create(rewriter, loc, transShapeForEmptyOp,
+                              packOp.getSourceType().getElementType());
 
   // 2.2 Create linalg.transpose
-  auto transposedOp = rewriter.create<linalg::TransposeOp>(loc, input, empty,
-                                                           srcPermForTranspose);
+  auto transposedOp = linalg::TransposeOp::create(rewriter, loc, input, empty,
+                                                  srcPermForTranspose);
 
   // 3. Insert the inner tile to the destination:
   //  %inserted_tile = tensor.insert_slice(%transposed_tile)
@@ -1246,9 +1250,9 @@ LogicalResult DecomposeOuterUnitDimsPackOpPattern::matchAndRewrite(
   }
 
   // 4. Replace tensor.packOp with tensor.insert_slice created above
-  auto insert = rewriter.create<tensor::InsertSliceOp>(
-      loc, transposedOp.getResult()[0], packOp.getDest(), writeOffsets,
-      writeSizes, writeStrides);
+  auto insert = tensor::InsertSliceOp::create(
+      rewriter, loc, transposedOp.getResult()[0], packOp.getDest(),
+      writeOffsets, writeSizes, writeStrides);
   rewriter.replaceOp(packOp, insert.getResult());
 
   return success();
@@ -1313,7 +1317,7 @@ LogicalResult DecomposeOuterUnitDimsUnPackOpPattern::matchAndRewrite(
     // outer-untiled-dims
     if (ShapedType::isDynamic(srcShape[i])) {
       OpFoldResult dynamicDim =
-          rewriter.create<tensor::DimOp>(loc, source, i).getResult();
+          tensor::DimOp::create(rewriter, loc, source, i).getResult();
       extractSliceSizes.push_back(dynamicDim);
       shapeForEmptyOp.push_back(dynamicDim);
     } else {
@@ -1340,8 +1344,8 @@ LogicalResult DecomposeOuterUnitDimsUnPackOpPattern::matchAndRewrite(
   readShapeForExtractSlice.append(tileShape.begin(), tileShape.end());
   Type elemType = unpackOp.getSourceType().getElementType();
   auto readType = RankedTensorType::get(readShapeForExtractSlice, elemType);
-  Value innerTile = rewriter.create<tensor::ExtractSliceOp>(
-      loc, readType, unpackOp.getSource(), extractSliceOffsets,
+  Value innerTile = tensor::ExtractSliceOp::create(
+      rewriter, loc, readType, unpackOp.getSource(), extractSliceOffsets,
       extractSliceSizes, extractSliceStrides);
 
   // 2. Transpose the tile to match the outer corresponding tile order.
@@ -1352,9 +1356,9 @@ LogicalResult DecomposeOuterUnitDimsUnPackOpPattern::matchAndRewrite(
   applyPermutationToVector<OpFoldResult>(shapeForEmptyOp, perm);
 
   Value empty =
-      rewriter.create<tensor::EmptyOp>(loc, shapeForEmptyOp, elemType);
+      tensor::EmptyOp::create(rewriter, loc, shapeForEmptyOp, elemType);
   auto transposedOp =
-      rewriter.create<linalg::TransposeOp>(loc, innerTile, empty, perm);
+      linalg::TransposeOp::create(rewriter, loc, innerTile, empty, perm);
 
   // 3. Handle in-complete tiles if needed. It truncates trailing data from the
   // transposed tile.
@@ -1369,8 +1373,9 @@ LogicalResult DecomposeOuterUnitDimsUnPackOpPattern::matchAndRewrite(
           tensor::getMixedSize(rewriter, loc, unpackOp.getDest(), i));
   }
 
-  auto partialTile = rewriter.create<tensor::ExtractSliceOp>(
-      loc, transposedOp.getResult()[0], tileOffsets, tileSizes, tileStrides);
+  auto partialTile =
+      tensor::ExtractSliceOp::create(rewriter, loc, transposedOp.getResult()[0],
+                                     tileOffsets, tileSizes, tileStrides);
 
   // 4. Insert the result to the destination tensor.
   SmallVector<OpFoldResult> writeSizes;
@@ -1382,9 +1387,9 @@ LogicalResult DecomposeOuterUnitDimsUnPackOpPattern::matchAndRewrite(
     else
       writeSizes.push_back(oneIdxAttr);
   }
-  auto insert = rewriter.create<tensor::InsertSliceOp>(
-      loc, partialTile, unpackOp.getDest(), writeOffsets, writeSizes,
-      writeStrides);
+  auto insert = tensor::InsertSliceOp::create(rewriter, loc, partialTile,
+                                              unpackOp.getDest(), writeOffsets,
+                                              writeSizes, writeStrides);
   rewriter.replaceOp(unpackOp, insert.getResult());
 
   return success();
@@ -1491,8 +1496,8 @@ FailureOr<Conv1DOp> DownscaleSizeOneWindowed2DConvolution<Conv2DOp, Conv1DOp>::
   dilations.erase(dilations.begin() + (removeH ? 0 : 1));
   auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
 
-  auto conv1DOp = rewriter.create<Conv1DOp>(
-      loc, newOutputType, ValueRange{newInput, newKernel},
+  auto conv1DOp = Conv1DOp::create(
+      rewriter, loc, newOutputType, ValueRange{newInput, newKernel},
       ValueRange{newOutput}, stridesAttr, dilationsAttr);
 
   // Insert back.
@@ -1578,8 +1583,8 @@ DownscaleDepthwiseConv2DNhwcHwcOp::returningMatchAndRewrite(
   dilations.erase(dilations.begin() + (removeH ? 0 : 1));
   auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
 
-  auto conv1DOp = rewriter.create<DepthwiseConv1DNwcWcOp>(
-      loc, newOutputType, ValueRange{newInput, newKernel},
+  auto conv1DOp = DepthwiseConv1DNwcWcOp::create(
+      rewriter, loc, newOutputType, ValueRange{newInput, newKernel},
       ValueRange{newOutput}, stridesAttr, dilationsAttr);
 
   // Insert back.
@@ -1635,9 +1640,9 @@ DownscaleConv2DOp::returningMatchAndRewrite(Conv2DOp convOp,
   Value newOutput = tensor::createCanonicalRankReducingExtractSliceOp(
       rewriter, loc, output, newOutputType);
 
-  auto conv1DOp = rewriter.create<Conv1DOp>(loc, newOutputType,
-                                            ValueRange{newInput, newKernel},
-                                            ValueRange{newOutput});
+  auto conv1DOp =
+      Conv1DOp::create(rewriter, loc, newOutputType,
+                       ValueRange{newInput, newKernel}, ValueRange{newOutput});
 
   // Insert back.
   Value inserted = tensor::createCanonicalRankReducingInsertSliceOp(
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp b/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
index 092aecceef6b3..99fb8c796cf06 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
@@ -67,7 +67,7 @@ FailureOr<Operation *> transposeConv2DHelper(RewriterBase &rewriter,
   Value input;
   if (isTensorOp) {
 
-    input = rewriter.create<tensor::EmptyOp>(loc, newFilterShape, elementTy)
+    input = tensor::EmptyOp::create(rewriter, loc, newFilterShape, elementTy)
                 .getResult();
   } else {
     input = rewriter
@@ -78,7 +78,7 @@ FailureOr<Operation *> transposeConv2DHelper(RewriterBase &rewriter,
 
   // We can then construct the transposition on our filter.
   auto transpose =
-      rewriter.create<linalg::TransposeOp>(loc, filter, input, filterPerm);
+      linalg::TransposeOp::create(rewriter, loc, filter, input, filterPerm);
 
   Value newFilter;
   if (isTensorOp) {
@@ -98,8 +98,8 @@ FailureOr<Operation *> transposeConv2DHelper(RewriterBase &rewriter,
     resultTy.push_back(op->getResult(0).getType());
   }
   auto newConv =
-      rewriter.create<HWCFConvOp>(loc, resultTy, newInputs, op.getOutputs(),
-                                  op.getStrides(), op.getDilations());
+      HWCFConvOp::create(rewriter, loc, resultTy, newInputs, op.getOutputs(),
+                         op.getStrides(), op.getDilations());
   rewriter.replaceOp(op, newConv);
   return newConv.getOperation();
 }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp b/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
index 934781d1cab75..a2a4335ef8e22 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
@@ -47,25 +47,25 @@ FailureOr<Operation *> mlir::linalg::transposeMatmul(RewriterBase &rewriter,
 
   SmallVector<Value> dynamicDims;
   if (type.isDynamicDim(1))
-    dynamicDims.push_back(rewriter.create<tensor::DimOp>(loc, input, 1));
+    dynamicDims.push_back(tensor::DimOp::create(rewriter, loc, input, 1));
   if (type.isDynamicDim(0))
-    dynamicDims.push_back(rewriter.create<tensor::DimOp>(loc, input, 0));
+    dynamicDims.push_back(tensor::DimOp::create(rewriter, loc, input, 0));
 
   ArrayRef<int64_t> shape = type.getShape();
-  Value empty = rewriter.create<tensor::EmptyOp>(
-      loc, ArrayRef<int64_t>{shape[1], shape[0]}, type.getElementType(),
-      dynamicDims);
-  auto transposeOp = rewriter.create<linalg::TransposeOp>(
-      loc, input, empty, ArrayRef<int64_t>{1, 0});
+  Value empty = tensor::EmptyOp::create(rewriter, loc,
+                                        ArrayRef<int64_t>{shape[1], shape[0]},
+                                        type.getElementType(), dynamicDims);
+  auto transposeOp = linalg::TransposeOp::create(rewriter, loc, input, empty,
+                                                 ArrayRef<int64_t>{1, 0});
   Operation *newMatmulOp;
   if (transposeLHS) {
-    newMatmulOp = rewriter.create<linalg::MatmulTransposeAOp>(
-        loc, matmulOp.getResultTypes(),
+    newMatmulOp = linalg::MatmulTransposeAOp::create(
+        rewriter, loc, matmulOp.getResultTypes(),
         ValueRange{transposeOp->getResult(0), matmulOp.getInputs()[1]},
         matmulOp.getOutputs());
   } else {
-    newMatmulOp = rewriter.create<linalg::MatmulTransposeBOp>(
-        loc, matmulOp.getResultTypes(),
+    newMatmulOp = linalg::MatmulTransposeBOp::create(
+        rewriter, loc, matmulOp.getResultTypes(),
         ValueRange{matmulOp.getInputs()[0], transposeOp->getResult(0)},
         matmulOp.getOutputs());
   }
@@ -102,27 +102,27 @@ mlir::linalg::transposeBatchMatmul(RewriterBase &rewriter,
 
   SmallVector<Value> dynamicDims;
   if (type.isDynamicDim(0))
-    dynamicDims.push_back(rewriter.create<tensor::DimOp>(loc, input, 0));
+    dynamicDims.push_back(tensor::DimOp::create(rewriter, loc, input, 0));
   if (type.isDynamicDim(2))
-    dynamicDims.push_back(rewriter.create<tensor::DimOp>(loc, input, 2));
+    dynamicDims.push_back(tensor::DimOp::create(rewriter, loc, input, 2));
   if (type.isDynamicDim(1))
-    dynamicDims.push_back(rewriter.create<tensor::DimOp>(loc, input, 1));
+    dynamicDims.push_back(tensor::DimOp::create(rewriter, loc, input, 1));
 
   ArrayRef<int64_t> shape = type.getShape();
-  Value empty = rewriter.create<tensor::EmptyOp>(
-      loc, ArrayRef<int64_t>{shape[0], shape[2], shape[1]},
+  Value empty = tensor::EmptyOp::create(
+      rewriter, loc, ArrayRef<int64_t>{shape[0], shape[2], shape[1]},
       type.getElementType(), dynamicDims);
-  auto transposeOp = rewriter.create<linalg::TransposeOp>(
-      loc, input, empty, ArrayRef<int64_t>{0, 2, 1});
+  auto transposeOp = linalg::TransposeOp::create(rewriter, loc, input, empty,
+                                                 ArrayRef<int64_t>{0, 2, 1});
   Operation *newMatmulOp;
   if (transposeLHS) {
-    newMatmulOp = rewriter.create<linalg::BatchMatmulTransposeAOp>(
-        loc, batchMatmulOp.getResultTypes(),
+    newMatmulOp = linalg::BatchMatmulTransposeAOp::create(
+        rewriter, loc, batchMatmulOp.getResultTypes(),
         ValueRange{transposeOp->getResult(0), batchMatmulOp.getInputs()[1]},
         batchMatmulOp.getOutputs());
   } else {
-    newMatmulOp = rewriter.create<linalg::BatchMatmulTransposeBOp>(
-        loc, batchMatmulOp.getResultTypes(),
+    newMatmulOp = linalg::BatchMatmulTransposeBOp::create(
+        rewriter, loc, batchMatmulOp.getResultTypes(),
         ValueRange{batchMatmulOp.getInputs()[0], transposeOp->getResult(0)},
         batchMatmulOp.getOutputs());
   }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 77c85abab9aa0..78c6bd13aba3a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -120,8 +120,9 @@ extractConvInputSlices(RewriterBase &rewriter, Location loc, Value input,
     SmallVector<int64_t> strides = {1};
     for (int64_t kw = 0; kw < kwSize; ++kw) {
       for (int64_t w = 0; w < wSize; w += wSizeStep) {
-        result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
-            loc, input, /*offsets=*/ArrayRef<int64_t>{w + kw}, sizes, strides));
+        result.push_back(vector::ExtractStridedSliceOp::create(
+            rewriter, loc, input, /*offsets=*/ArrayRef<int64_t>{w + kw}, sizes,
+            strides));
       }
     }
   } else {
@@ -131,8 +132,8 @@ extractConvInputSlices(RewriterBase &rewriter, Location loc, Value input,
     SmallVector<int64_t> strides = {1, 1, 1};
     for (int64_t kw = 0; kw < kwSize; ++kw) {
       for (int64_t w = 0; w < wSize; w += wSizeStep) {
-        result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
-            loc, input,
+        result.push_back(vector::ExtractStridedSliceOp::create(
+            rewriter, loc, input,
             /*offsets=*/ArrayRef<int64_t>{0, w * strideW + kw * dilationW, 0},
             sizes, strides));
       }
@@ -150,8 +151,8 @@ static SmallVector<Value> extractConvFilterSlices(RewriterBase &rewriter,
   // Extract rhs slice of size [{c, f} for channeled convolutions and {1} for
   // non-chanelled convolution] @ [kw].
   for (int64_t kw = 0; kw < kwSize; ++kw) {
-    result.push_back(rewriter.create<vector::ExtractOp>(
-        loc, filter, /*offsets=*/ArrayRef<int64_t>{kw}));
+    result.push_back(vector::ExtractOp::create(
+        rewriter, loc, filter, /*offsets=*/ArrayRef<int64_t>{kw}));
   }
   return result;
 }
@@ -168,8 +169,9 @@ extractConvResultSlices(RewriterBase &rewriter, Location loc, Value res,
     SmallVector<int64_t> sizes = {wSizeStep};
     SmallVector<int64_t> strides = {1};
     for (int64_t w = 0; w < wSize; w += wSizeStep) {
-      result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
-          loc, res, /*offsets=*/ArrayRef<int64_t>{w}, sizes, strides));
+      result.push_back(vector::ExtractStridedSliceOp::create(
+          rewriter, loc, res, /*offsets=*/ArrayRef<int64_t>{w}, sizes,
+          strides));
     }
   } else {
     // Extract res slice: {n, wSizeStep, f} @ [0, w, 0] for channeled
@@ -177,8 +179,9 @@ extractConvResultSlices(RewriterBase &rewriter, Location loc, Value res,
     SmallVector<int64_t> sizes = {nSize, wSizeStep, fSize};
     SmallVector<int64_t> strides = {1, 1, 1};
     for (int64_t w = 0; w < wSize; w += wSizeStep) {
-      result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
-          loc, res, /*offsets=*/ArrayRef<int64_t>{0, w, 0}, sizes, strides));
+      result.push_back(vector::ExtractStridedSliceOp::create(
+          rewriter, loc, res, /*offsets=*/ArrayRef<int64_t>{0, w, 0}, sizes,
+          strides));
     }
   }
   return result;
@@ -195,17 +198,18 @@ static Value insertConvResultSlices(RewriterBase &rewriter, Location loc,
     // This does not depend on kw.
     SmallVector<int64_t> strides = {1};
     for (int64_t w = 0; w < wSize; w += wSizeStep) {
-      res = rewriter.create<vector::InsertStridedSliceOp>(
-          loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{w}, strides);
+      res = vector::InsertStridedSliceOp::create(
+          rewriter, loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{w},
+          strides);
     }
   } else {
     // Write back res slice: {n, wSizeStep, f} @ [0, w, 0] for channeled
     // convolution. This does not depend on kw.
     SmallVector<int64_t> strides = {1, 1, 1};
     for (int64_t w = 0; w < wSize; w += wSizeStep) {
-      res = rewriter.create<vector::InsertStridedSliceOp>(
-          loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{0, w, 0},
-          strides);
+      res = vector::InsertStridedSliceOp::create(
+          rewriter, loc, resVals[w], res,
+          /*offsets=*/ArrayRef<int64_t>{0, w, 0}, strides);
     }
   }
   return res;
@@ -347,8 +351,8 @@ VectorizationState::precomputeIterSpaceValueSizes(RewriterBase &rewriter,
   for (int vecDim = 0, end = canonicalVecShape.size(); vecDim < end; ++vecDim) {
     if (ShapedType::isStatic(iterSpaceStaticSizes[vecDim])) {
       // Create constant index op for static dimensions.
-      iterSpaceValueSizes.push_back(rewriter.create<arith::ConstantIndexOp>(
-          linalgOp.getLoc(), iterSpaceStaticSizes[vecDim]));
+      iterSpaceValueSizes.push_back(arith::ConstantIndexOp::create(
+          rewriter, linalgOp.getLoc(), iterSpaceStaticSizes[vecDim]));
       continue;
     }
 
@@ -360,11 +364,12 @@ VectorizationState::precomputeIterSpaceValueSizes(RewriterBase &rewriter,
                                                          operandDimPos)))
       return failure();
 
-    Value dynamicDim = linalgOp.hasPureTensorSemantics()
-                           ? (Value)rewriter.create<tensor::DimOp>(
-                                 linalgOp.getLoc(), operand, operandDimPos)
-                           : (Value)rewriter.create<memref::DimOp>(
-                                 linalgOp.getLoc(), operand, operandDimPos);
+    Value dynamicDim =
+        linalgOp.hasPureTensorSemantics()
+            ? (Value)tensor::DimOp::create(rewriter, linalgOp.getLoc(), operand,
+                                           operandDimPos)
+            : (Value)memref::DimOp::create(rewriter, linalgOp.getLoc(), operand,
+                                           operandDimPos);
     iterSpaceValueSizes.push_back(dynamicDim);
   }
 
@@ -503,8 +508,8 @@ Value VectorizationState::getOrCreateMaskFor(
          "Masked 0-d vectors are not supported yet");
 
   // Create the mask based on the dimension values.
-  Value mask = rewriter.create<vector::CreateMaskOp>(linalgOp.getLoc(),
-                                                     maskType, upperBounds);
+  Value mask = vector::CreateMaskOp::create(rewriter, linalgOp.getLoc(),
+                                            maskType, upperBounds);
   LDBG("Creating new mask: " << mask << "\n");
   activeMaskCache[maskingMap] = mask;
   return mask;
@@ -672,8 +677,8 @@ static Operation *buildMultiDimReduce(OpBuilder &b, Operation *reduceOp,
                                       ArrayRef<bool> dimsToMask) {
   auto maybeKind = getCombinerOpKind(reduceOp);
   assert(maybeKind && "Failed precondition: could not get reduction kind");
-  return b.create<vector::MultiDimReductionOp>(
-      reduceOp->getLoc(), valueToReduce, acc, dimsToMask, *maybeKind);
+  return vector::MultiDimReductionOp::create(
+      b, reduceOp->getLoc(), valueToReduce, acc, dimsToMask, *maybeKind);
 }
 
 static SmallVector<bool> getDimsToReduce(LinalgOp linalgOp) {
@@ -717,19 +722,20 @@ static Value buildVectorWrite(RewriterBase &rewriter, Value value,
   Operation *write;
   if (vectorType.getRank() > 0) {
     AffineMap writeMap = inversePermutation(reindexIndexingMap(opOperandMap));
-    SmallVector<Value> indices(linalgOp.getRank(outputOperand),
-                               rewriter.create<arith::ConstantIndexOp>(loc, 0));
+    SmallVector<Value> indices(
+        linalgOp.getRank(outputOperand),
+        arith::ConstantIndexOp::create(rewriter, loc, 0));
     value = broadcastIfNeeded(rewriter, value, vectorType);
     assert(value.getType() == vectorType && "Incorrect type");
-    write = rewriter.create<vector::TransferWriteOp>(
-        loc, value, outputOperand->get(), indices, writeMap);
+    write = vector::TransferWriteOp::create(
+        rewriter, loc, value, outputOperand->get(), indices, writeMap);
   } else {
     // 0-d case is still special: do not invert the reindexing writeMap.
     if (!isa<VectorType>(value.getType()))
-      value = rewriter.create<vector::BroadcastOp>(loc, vectorType, value);
+      value = vector::BroadcastOp::create(rewriter, loc, vectorType, value);
     assert(value.getType() == vectorType && "Incorrect type");
-    write = rewriter.create<vector::TransferWriteOp>(
-        loc, value, outputOperand->get(), ValueRange{});
+    write = vector::TransferWriteOp::create(rewriter, loc, value,
+                                            outputOperand->get(), ValueRange{});
   }
 
   write = state.maskOperation(rewriter, write, linalgOp, opOperandMap);
@@ -807,7 +813,7 @@ static VectorizationHookResult vectorizeLinalgIndex(RewriterBase &rewriter,
   auto indexVectorType =
       VectorType::get({targetShape[dim]}, rewriter.getIndexType(),
                       state.getScalableVecDims()[dim]);
-  auto indexSteps = rewriter.create<vector::StepOp>(loc, indexVectorType);
+  auto indexSteps = vector::StepOp::create(rewriter, loc, indexVectorType);
   // Return the one-dimensional index vector if it lives in the trailing
   // dimension of the iteration space since the vectorization algorithm in this
   // case can handle the broadcast.
@@ -822,14 +828,14 @@ static VectorizationHookResult vectorizeLinalgIndex(RewriterBase &rewriter,
   auto permMap =
       AffineMap::getPermutationMap(permPattern, linalgOp.getContext());
 
-  auto broadCastOp = rewriter.create<vector::BroadcastOp>(
-      loc, state.getCanonicalVecType(rewriter.getIndexType(), permMap),
-      indexSteps);
+  auto broadCastOp = vector::BroadcastOp::create(
+      rewriter, loc,
+      state.getCanonicalVecType(rewriter.getIndexType(), permMap), indexSteps);
   SmallVector<int64_t> transposition =
       llvm::to_vector<16>(llvm::seq<int64_t>(0, linalgOp.getNumLoops()));
   std::swap(transposition.back(), transposition[dim]);
   auto transposeOp =
-      rewriter.create<vector::TransposeOp>(loc, broadCastOp, transposition);
+      vector::TransposeOp::create(rewriter, loc, broadCastOp, transposition);
   return VectorizationHookResult{VectorizationHookStatus::NewOp, transposeOp};
 }
 
@@ -882,19 +888,19 @@ static Value calculateGatherOffset(RewriterBase &rewriter,
 
   const size_t numIndices = extractOp.getIndices().size();
   for (size_t i = 1; i < numIndices; i++) {
-    Value dimIdx = rewriter.create<arith::ConstantIndexOp>(loc, i);
+    Value dimIdx = arith::ConstantIndexOp::create(rewriter, loc, i);
 
     auto dimSize = broadcastIfNeeded(
         rewriter,
-        rewriter.create<tensor::DimOp>(loc, extractOp.getTensor(), dimIdx),
+        tensor::DimOp::create(rewriter, loc, extractOp.getTensor(), dimIdx),
         indexVecType);
 
-    offset = rewriter.create<arith::MulIOp>(loc, offset, dimSize);
+    offset = arith::MulIOp::create(rewriter, loc, offset, dimSize);
 
     auto extractOpIndex = broadcastIfNeeded(
         rewriter, bvm.lookup(extractOp.getIndices()[i]), indexVecType);
 
-    offset = rewriter.create<arith::AddIOp>(loc, extractOpIndex, offset);
+    offset = arith::AddIOp::create(rewriter, loc, extractOpIndex, offset);
   }
 
   return offset;
@@ -1139,18 +1145,18 @@ vectorizeTensorExtract(RewriterBase &rewriter, VectorizationState &state,
 
   // Compute the static loop sizes of the extract op.
   auto resultType = state.getCanonicalVecType(extractOp.getResult().getType());
-  auto maskConstantOp = rewriter.create<arith::ConstantOp>(
-      loc,
+  auto maskConstantOp = arith::ConstantOp::create(
+      rewriter, loc,
       DenseIntElementsAttr::get(state.getCanonicalVecType(rewriter.getI1Type()),
                                 /*value=*/true));
-  auto passThruConstantOp =
-      rewriter.create<arith::ConstantOp>(loc, rewriter.getZeroAttr(resultType));
+  auto passThruConstantOp = arith::ConstantOp::create(
+      rewriter, loc, rewriter.getZeroAttr(resultType));
 
   // Base indices are currently set to 0. We will need to re-visit if more
   // generic scenarios are to be supported.
   SmallVector<Value> baseIndices(
       extractOp.getIndices().size(),
-      rewriter.create<arith::ConstantIndexOp>(loc, 0));
+      arith::ConstantIndexOp::create(rewriter, loc, 0));
 
   VectorMemoryAccessKind memAccessKind =
       getTensorExtractMemoryAccessPattern(extractOp, linalgOp, resultType);
@@ -1160,8 +1166,8 @@ vectorizeTensorExtract(RewriterBase &rewriter, VectorizationState &state,
     Value offset = calculateGatherOffset(rewriter, state, extractOp, bvm);
 
     // Generate the gather load
-    Operation *gatherOp = rewriter.create<vector::GatherOp>(
-        loc, resultType, extractOp.getTensor(), baseIndices, offset,
+    Operation *gatherOp = vector::GatherOp::create(
+        rewriter, loc, resultType, extractOp.getTensor(), baseIndices, offset,
         maskConstantOp, passThruConstantOp);
     gatherOp = state.maskOperation(rewriter, gatherOp, linalgOp);
 
@@ -1195,13 +1201,13 @@ vectorizeTensorExtract(RewriterBase &rewriter, VectorizationState &state,
       continue;
     }
 
-    auto indexAs1dVector = rewriter.create<vector::ShapeCastOp>(
-        loc,
+    auto indexAs1dVector = vector::ShapeCastOp::create(
+        rewriter, loc,
         VectorType::get(resultType.getShape().back(), rewriter.getIndexType(),
                         resultType.getScalableDims().back()),
         idx);
     transferReadIdxs.push_back(
-        rewriter.create<vector::ExtractOp>(loc, indexAs1dVector, 0));
+        vector::ExtractOp::create(rewriter, loc, indexAs1dVector, 0));
   }
 
   // `tensor.extract_element` is always in-bounds, hence the following holds.
@@ -1215,8 +1221,8 @@ vectorizeTensorExtract(RewriterBase &rewriter, VectorizationState &state,
     SmallVector<AffineExpr> exprs(dstRank, getAffineConstantExpr(0, ctx));
     auto permutationMap = AffineMap::get(srcRank, 0, exprs, ctx);
 
-    auto transferReadOp = rewriter.create<vector::TransferReadOp>(
-        loc, resultType, extractOp.getTensor(), transferReadIdxs,
+    auto transferReadOp = vector::TransferReadOp::create(
+        rewriter, loc, resultType, extractOp.getTensor(), transferReadIdxs,
         /*padding=*/std::nullopt, permutationMap, inBounds);
 
     // Mask this broadcasting xfer_read here rather than relying on the generic
@@ -1224,8 +1230,8 @@ vectorizeTensorExtract(RewriterBase &rewriter, VectorizationState &state,
     // valid here).
     SmallVector<int64_t> readMaskShape = {1};
     auto readMaskType = VectorType::get(readMaskShape, rewriter.getI1Type());
-    auto allTrue = rewriter.create<vector::ConstantMaskOp>(
-        loc, readMaskType, vector::ConstantMaskKind::AllTrue);
+    auto allTrue = vector::ConstantMaskOp::create(
+        rewriter, loc, readMaskType, vector::ConstantMaskKind::AllTrue);
     auto *maskedReadOp =
         mlir::vector::maskOperation(rewriter, transferReadOp, allTrue);
 
@@ -1252,8 +1258,8 @@ vectorizeTensorExtract(RewriterBase &rewriter, VectorizationState &state,
     rankDiff--;
   }
 
-  auto transferReadOp = rewriter.create<vector::TransferReadOp>(
-      loc, resultType, extractOp.getTensor(), transferReadIdxs,
+  auto transferReadOp = vector::TransferReadOp::create(
+      rewriter, loc, resultType, extractOp.getTensor(), transferReadIdxs,
       /*padding=*/std::nullopt, permutationMap, inBounds);
 
   LDBG("Vectorised as contiguous load: " << extractOp);
@@ -1434,7 +1440,7 @@ vectorizeAsLinalgGeneric(RewriterBase &rewriter, VectorizationState &state,
 
   // 3. Turn all BBArgs into vector.transfer_read / load.
   Location loc = linalgOp.getLoc();
-  Value zero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+  Value zero = arith::ConstantIndexOp::create(rewriter, loc, 0);
   for (OpOperand *opOperand : linalgOp.getOpOperandsMatchingBBargs()) {
     BlockArgument bbarg = linalgOp.getMatchingBlockArgument(opOperand);
     if (linalgOp.isScalar(opOperand)) {
@@ -1464,8 +1470,8 @@ vectorizeAsLinalgGeneric(RewriterBase &rewriter, VectorizationState &state,
 
     SmallVector<Value> indices(linalgOp.getShape(opOperand).size(), zero);
 
-    Operation *read = rewriter.create<vector::TransferReadOp>(
-        loc, readType, opOperand->get(), indices,
+    Operation *read = vector::TransferReadOp::create(
+        rewriter, loc, readType, opOperand->get(), indices,
         /*padding=*/std::nullopt, readMap);
     read = state.maskOperation(rewriter, read, linalgOp, indexingMap);
     Value readValue = read->getResult(0);
@@ -1481,8 +1487,8 @@ vectorizeAsLinalgGeneric(RewriterBase &rewriter, VectorizationState &state,
     // 3.c. Not all ops support 0-d vectors, extract the scalar for now.
     // TODO: remove this.
     if (readType.getRank() == 0)
-      readValue = rewriter.create<vector::ExtractOp>(loc, readValue,
-                                                     ArrayRef<int64_t>());
+      readValue = vector::ExtractOp::create(rewriter, loc, readValue,
+                                            ArrayRef<int64_t>());
 
     LDBG("New vectorized bbarg(" << bbarg.getArgNumber() << "): " << readValue
                                  << "\n");
@@ -1689,17 +1695,16 @@ createWriteOrMaskedWrite(OpBuilder &builder, Location loc, Value vecToStore,
           writeIndices.size() == static_cast<size_t>(destRank)) &&
          "Invalid number of write indices!");
   if (writeIndices.empty()) {
-    auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
+    auto zero = arith::ConstantIndexOp::create(builder, loc, 0);
     writeIndices.assign(destRank, zero);
   }
 
   // Generate the xfer_write Op
-  Operation *write =
-      builder.create<vector::TransferWriteOp>(loc,
-                                              /*vector=*/vecToStore,
-                                              /*source=*/dest,
-                                              /*indices=*/writeIndices,
-                                              /*inBounds=*/inBoundsVal);
+  Operation *write = vector::TransferWriteOp::create(builder, loc,
+                                                     /*vector=*/vecToStore,
+                                                     /*source=*/dest,
+                                                     /*indices=*/writeIndices,
+                                                     /*inBounds=*/inBoundsVal);
 
   // If masking is disabled, exit.
   if (useInBoundsInsteadOfMasking)
@@ -1774,8 +1779,9 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, linalg::PackOp packOp,
   Location loc = packOp.getLoc();
   auto padValue = packOp.getPaddingValue();
   if (!padValue) {
-    padValue = rewriter.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(packOp.getSourceType().getElementType()));
+    padValue = arith::ConstantOp::create(
+        rewriter, loc,
+        rewriter.getZeroAttr(packOp.getSourceType().getElementType()));
   }
   ReifiedRankedShapedTypeDims reifiedReturnShapes;
   LogicalResult status =
@@ -1814,17 +1820,17 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, linalg::PackOp packOp,
   auto tiledPackType = VectorType::get(getTiledPackShape(packOp, destShape),
                                        packOp.getDestType().getElementType());
   auto shapeCastOp =
-      rewriter.create<vector::ShapeCastOp>(loc, tiledPackType, maskedRead);
+      vector::ShapeCastOp::create(rewriter, loc, tiledPackType, maskedRead);
 
   // Create TransposeOp.
   auto destPermutation =
       invertPermutationVector(getPackInverseDestPerm(packOp));
-  auto transposeOp = rewriter.create<vector::TransposeOp>(
-      loc, shapeCastOp.getResult(), destPermutation);
+  auto transposeOp = vector::TransposeOp::create(
+      rewriter, loc, shapeCastOp.getResult(), destPermutation);
 
   // Create TransferWriteOp.
-  Value dest = rewriter.create<tensor::EmptyOp>(
-      loc, reifiedReturnShapes[0],
+  Value dest = tensor::EmptyOp::create(
+      rewriter, loc, reifiedReturnShapes[0],
       transposeOp.getResult().getType().getElementType());
   Operation *write =
       createWriteOrMaskedWrite(rewriter, loc, transposeOp.getResult(), dest);
@@ -1924,8 +1930,9 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, linalg::UnPackOp unpackOp,
   }
   Location loc = unpackOp->getLoc();
 
-  auto padValue = rewriter.create<arith::ConstantOp>(
-      loc, rewriter.getZeroAttr(unpackOp.getSourceType().getElementType()));
+  auto padValue = arith::ConstantOp::create(
+      rewriter, loc,
+      rewriter.getZeroAttr(unpackOp.getSourceType().getElementType()));
 
   // Read result, mask if necessary. If transferReadOp shape is not equal
   // to shape of source, then a mask is necessary.
@@ -1943,16 +1950,16 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, linalg::UnPackOp unpackOp,
   RankedTensorType stripMineTensorType =
       RankedTensorType::get(stripMineShape, stripMineElemType);
   // Transpose the appropriate rows to match output.
-  vector::TransposeOp transposeOp = rewriter.create<vector::TransposeOp>(
-      loc, readResult, lastDimToInsertPosPerm);
+  vector::TransposeOp transposeOp = vector::TransposeOp::create(
+      rewriter, loc, readResult, lastDimToInsertPosPerm);
 
   // Collapse the vector to the size required by result.
   RankedTensorType collapsedType = tensor::CollapseShapeOp::inferCollapsedType(
       stripMineTensorType, packMetadata.reassociations);
   mlir::VectorType vecCollapsedType =
       VectorType::get(collapsedType.getShape(), collapsedType.getElementType());
-  vector::ShapeCastOp shapeCastOp = rewriter.create<vector::ShapeCastOp>(
-      loc, vecCollapsedType, transposeOp->getResult(0));
+  vector::ShapeCastOp shapeCastOp = vector::ShapeCastOp::create(
+      rewriter, loc, vecCollapsedType, transposeOp->getResult(0));
 
   // writeVectorSizes had to match the shapecast shape for dynamic sizes,
   // otherwise the validator complains that the mask size is invalid.
@@ -1992,8 +1999,8 @@ vectorizeAsTensorPadOp(RewriterBase &rewriter, tensor::PadOp padOp,
       /*useInBoundsInsteadOfMasking=*/false);
 
   // Create Xfer write Op
-  Value dest = rewriter.create<tensor::EmptyOp>(
-      loc, reifiedReturnShapes[0], padOp.getResultType().getElementType());
+  Value dest = tensor::EmptyOp::create(rewriter, loc, reifiedReturnShapes[0],
+                                       padOp.getResultType().getElementType());
   Operation *write = createWriteOrMaskedWrite(rewriter, loc, maskedRead, dest);
   newResults.push_back(write->getResult(0));
   return success();
@@ -2756,20 +2763,21 @@ LogicalResult mlir::linalg::vectorizeCopy(RewriterBase &rewriter,
   auto writeType = VectorType::get(dstType.getShape(), dstElementType);
 
   Location loc = copyOp->getLoc();
-  Value zero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+  Value zero = arith::ConstantIndexOp::create(rewriter, loc, 0);
   SmallVector<Value> indices(srcType.getRank(), zero);
 
-  Value readValue = rewriter.create<vector::TransferReadOp>(
-      loc, readType, copyOp.getSource(), indices,
+  Value readValue = vector::TransferReadOp::create(
+      rewriter, loc, readType, copyOp.getSource(), indices,
       /*padding=*/std::nullopt,
       rewriter.getMultiDimIdentityMap(srcType.getRank()));
   if (cast<VectorType>(readValue.getType()).getRank() == 0) {
+    readValue = vector::ExtractOp::create(rewriter, loc, readValue,
+                                          ArrayRef<int64_t>());
     readValue =
-        rewriter.create<vector::ExtractOp>(loc, readValue, ArrayRef<int64_t>());
-    readValue = rewriter.create<vector::BroadcastOp>(loc, writeType, readValue);
+        vector::BroadcastOp::create(rewriter, loc, writeType, readValue);
   }
-  Operation *writeValue = rewriter.create<vector::TransferWriteOp>(
-      loc, readValue, copyOp.getTarget(), indices,
+  Operation *writeValue = vector::TransferWriteOp::create(
+      rewriter, loc, readValue, copyOp.getTarget(), indices,
       rewriter.getMultiDimIdentityMap(srcType.getRank()));
   rewriter.replaceOp(copyOp, writeValue->getResults());
   return success();
@@ -3079,8 +3087,8 @@ vectorizeAsInsertSliceOp(RewriterBase &rewriter, tensor::InsertSliceOp sliceOp,
 
   if (!padValue) {
     auto elemType = sourceType.getElementType();
-    padValue = rewriter.create<arith::ConstantOp>(
-        sliceOp.getLoc(), elemType, rewriter.getZeroAttr(elemType));
+    padValue = arith::ConstantOp::create(rewriter, sliceOp.getLoc(), elemType,
+                                         rewriter.getZeroAttr(elemType));
   }
 
   // 2. Get the vector shape
@@ -3111,7 +3119,7 @@ vectorizeAsInsertSliceOp(RewriterBase &rewriter, tensor::InsertSliceOp sliceOp,
 
   // Create read
   SmallVector<Value> readIndices(
-      vecType.getRank(), rewriter.create<arith::ConstantIndexOp>(loc, 0));
+      vecType.getRank(), arith::ConstantIndexOp::create(rewriter, loc, 0));
   Value read = mlir::vector::createReadOrMaskedRead(
       rewriter, loc, source, vecType.getShape(), padValue,
       /*useInBoundsInsteadOfMasking=*/inputVectorSizes.empty());
@@ -3198,9 +3206,10 @@ struct PadOpVectorizationWithInsertSlicePattern
     // Generate TransferReadOp: Read entire source tensor and add high
     // padding.
     SmallVector<Value> readIndices(
-        vecRank, rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0));
-    auto read = rewriter.create<vector::TransferReadOp>(
-        padOp.getLoc(), vecType, padOp.getSource(), readIndices, padValue);
+        vecRank, arith::ConstantIndexOp::create(rewriter, padOp.getLoc(), 0));
+    auto read = vector::TransferReadOp::create(rewriter, padOp.getLoc(),
+                                               vecType, padOp.getSource(),
+                                               readIndices, padValue);
 
     // Generate TransferWriteOp: Write to InsertSliceOp's dest tensor at
     // specified offsets. Write is fully in-bounds because a InsertSliceOp's
@@ -3334,8 +3343,8 @@ LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite(
   // When forwarding to vector.transfer_read, the attribute must be reset
   // conservatively.
   auto vectorType = xferOp.getVectorType();
-  Value res = rewriter.create<vector::TransferReadOp>(
-      xferOp.getLoc(), vectorType, in, xferOp.getIndices(),
+  Value res = vector::TransferReadOp::create(
+      rewriter, xferOp.getLoc(), vectorType, in, xferOp.getIndices(),
       xferOp.getPermutationMapAttr(), xferOp.getPadding(), xferOp.getMask(),
       rewriter.getBoolArrayAttr(
           SmallVector<bool>(vectorType.getRank(), false)));
@@ -3393,8 +3402,8 @@ LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite(
   // When forwarding to vector.transfer_write, the attribute must be reset
   // conservatively.
   auto vector = xferOp.getVector();
-  rewriter.create<vector::TransferWriteOp>(
-      xferOp.getLoc(), vector, out, xferOp.getIndices(),
+  vector::TransferWriteOp::create(
+      rewriter, xferOp.getLoc(), vector, out, xferOp.getIndices(),
       xferOp.getPermutationMapAttr(), xferOp.getMask(),
       rewriter.getBoolArrayAttr(SmallVector<bool>(
           dyn_cast<VectorType>(vector.getType()).getRank(), false)));
@@ -3589,7 +3598,7 @@ struct Conv1DGenerator
     }
 
     vector::TransferWriteOp write;
-    Value zero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+    Value zero = arith::ConstantIndexOp::create(rewriter, loc, 0);
 
     // w is unrolled (i.e. wSizeStep == 1) iff strideW > 1.
     // When strideW == 1, we can batch the contiguous loads and avoid
@@ -3608,17 +3617,17 @@ struct Conv1DGenerator
     SmallVector<Value> resPadding(resShape.size(), zero);
 
     // Read the whole lhs, rhs and res in one shot (with zero padding).
-    Value lhs = rewriter.create<vector::TransferReadOp>(
-        loc, lhsType, lhsShaped, lhsPadding,
+    Value lhs = vector::TransferReadOp::create(
+        rewriter, loc, lhsType, lhsShaped, lhsPadding,
         /*padding=*/arith::getZeroConstant(rewriter, loc, lhsEltType));
     // This is needed only for Conv.
     Value rhs = nullptr;
     if (oper == ConvOperationKind::Conv)
-      rhs = rewriter.create<vector::TransferReadOp>(
-          loc, rhsType, rhsShaped, rhsPadding,
+      rhs = vector::TransferReadOp::create(
+          rewriter, loc, rhsType, rhsShaped, rhsPadding,
           /*padding=*/arith::getZeroConstant(rewriter, loc, rhsEltType));
-    Value res = rewriter.create<vector::TransferReadOp>(
-        loc, resType, resShaped, resPadding,
+    Value res = vector::TransferReadOp::create(
+        rewriter, loc, resType, resShaped, resPadding,
         /*padding=*/arith::getZeroConstant(rewriter, loc, resEltType));
 
     // The base vectorization case for channeled convolution is input:
@@ -3633,16 +3642,16 @@ struct Conv1DGenerator
       // To match base vectorization case, we pre-transpose current case.
       // ncw -> nwc
       static constexpr std::array<int64_t, 3> permLhs = {0, 2, 1};
-      lhs = rewriter.create<vector::TransposeOp>(loc, lhs, permLhs);
+      lhs = vector::TransposeOp::create(rewriter, loc, lhs, permLhs);
       // fcw -> wcf
       static constexpr std::array<int64_t, 3> permRhs = {2, 1, 0};
 
       // This is needed only for Conv.
       if (oper == ConvOperationKind::Conv)
-        rhs = rewriter.create<vector::TransposeOp>(loc, rhs, permRhs);
+        rhs = vector::TransposeOp::create(rewriter, loc, rhs, permRhs);
       // nfw -> nwf
       static constexpr std::array<int64_t, 3> permRes = {0, 2, 1};
-      res = rewriter.create<vector::TransposeOp>(loc, res, permRes);
+      res = vector::TransposeOp::create(rewriter, loc, res, permRes);
       break;
     }
     }
@@ -3707,7 +3716,7 @@ struct Conv1DGenerator
     case Conv1DOpOrder::Ncw: {
       // nwf -> nfw
       static constexpr std::array<int64_t, 3> perm = {0, 2, 1};
-      res = rewriter.create<vector::TransposeOp>(loc, res, perm);
+      res = vector::TransposeOp::create(rewriter, loc, res, perm);
       break;
     }
     }
@@ -3731,16 +3740,16 @@ struct Conv1DGenerator
         cast<ShapedType>(val.getType()).cloneWith(std::nullopt, dstElementType);
 
     if (isa<IntegerType>(srcElementType) && isa<FloatType>(dstElementType)) {
-      return rewriter.create<arith::SIToFPOp>(loc, dstType, val);
+      return arith::SIToFPOp::create(rewriter, loc, dstType, val);
     }
 
     if (isa<FloatType>(srcElementType) && isa<FloatType>(dstElementType) &&
         srcWidth < dstWidth)
-      return rewriter.create<arith::ExtFOp>(loc, dstType, val);
+      return arith::ExtFOp::create(rewriter, loc, dstType, val);
 
     if (isa<IntegerType>(srcElementType) && isa<IntegerType>(dstElementType) &&
         srcWidth < dstWidth)
-      return rewriter.create<arith::ExtSIOp>(loc, dstType, val);
+      return arith::ExtSIOp::create(rewriter, loc, dstType, val);
 
     assert(false && "unhandled promotion case");
     return nullptr;
@@ -3755,8 +3764,8 @@ struct Conv1DGenerator
     bindDims(ctx, n, w, f, c);
     lhs = promote(rewriter, loc, lhs, res.getType());
     rhs = promote(rewriter, loc, rhs, res.getType());
-    auto contrationOp = rewriter.create<vector::ContractionOp>(
-        loc, lhs, rhs, res,
+    auto contrationOp = vector::ContractionOp::create(
+        rewriter, loc, lhs, rhs, res,
         /*indexingMaps=*/MapList{{n, w, c}, {c, f}, {n, w, f}},
         /*iteratorTypes=*/ArrayRef<vector::IteratorType>{par, par, par, red});
     contrationOp.setKind(reductionKind);
@@ -3767,8 +3776,8 @@ struct Conv1DGenerator
   // convolution.
   Value conv1dSliceAsOuterProduct(RewriterBase &rewriter, Location loc,
                                   Value lhs, Value rhs, Value res) {
-    return rewriter.create<vector::OuterProductOp>(
-        loc, res.getType(), lhs, rhs, res, vector::CombiningKind::ADD);
+    return vector::OuterProductOp::create(rewriter, loc, res.getType(), lhs,
+                                          rhs, res, vector::CombiningKind::ADD);
   }
 
   // Create a reduction: lhs{n, w, c} -> res{n, w, c}
@@ -3815,7 +3824,7 @@ struct Conv1DGenerator
     bindShapeDims(resShapedType, nSize, wSize);
 
     vector::TransferWriteOp write;
-    Value zero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+    Value zero = arith::ConstantIndexOp::create(rewriter, loc, 0);
 
     // w is unrolled (i.e. wSizeStep == 1) iff strideW > 1.
     // When strideW == 1, we can batch the contiguous loads and avoid
@@ -3858,29 +3867,29 @@ struct Conv1DGenerator
           cast<LinalgOp>(op).hasPureTensorSemantics(), opToMask, rewriter);
 
       Value maskOp =
-          rewriter.create<vector::CreateMaskOp>(loc, maskType, mixedDims);
+          vector::CreateMaskOp::create(rewriter, loc, maskType, mixedDims);
 
       return mlir::vector::maskOperation(rewriter, opToMask, maskOp);
     };
 
     // Read lhs slice of size {n, w * strideW + kw * dilationW, c} @ [0, 0,
     // 0].
-    Value lhs = rewriter.create<vector::TransferReadOp>(
-        loc, lhsType, lhsShaped, ValueRange{zero, zero, zero},
+    Value lhs = vector::TransferReadOp::create(
+        rewriter, loc, lhsType, lhsShaped, ValueRange{zero, zero, zero},
         /*padding=*/arith::getZeroConstant(rewriter, loc, lhsEltType));
     auto maybeMaskedLhs = maybeMaskXferOp(
         lhsType.getShape(), lhsType.getScalableDims(), lhs.getDefiningOp());
 
     // Read rhs slice of size {kw, c} @ [0, 0].
-    Value rhs = rewriter.create<vector::TransferReadOp>(
-        loc, rhsType, rhsShaped, ValueRange{zero, zero},
+    Value rhs = vector::TransferReadOp::create(
+        rewriter, loc, rhsType, rhsShaped, ValueRange{zero, zero},
         /*padding=*/arith::getZeroConstant(rewriter, loc, rhsEltType));
     auto maybeMaskedRhs = maybeMaskXferOp(
         rhsType.getShape(), rhsType.getScalableDims(), rhs.getDefiningOp());
 
     // Read res slice of size {n, w, c} @ [0, 0, 0].
-    Value res = rewriter.create<vector::TransferReadOp>(
-        loc, resType, resShaped, ValueRange{zero, zero, zero},
+    Value res = vector::TransferReadOp::create(
+        rewriter, loc, resType, resShaped, ValueRange{zero, zero, zero},
         /*padding=*/arith::getZeroConstant(rewriter, loc, resEltType));
     auto maybeMaskedRes = maybeMaskXferOp(
         resType.getShape(), resType.getScalableDims(), res.getDefiningOp());
@@ -3897,22 +3906,22 @@ struct Conv1DGenerator
     //   @ [0, sw * w + dw * kw, 0].
     for (int64_t kw = 0; kw < kwSize; ++kw) {
       for (int64_t w = 0; w < wSize; w += wSizeStep) {
-        lhsVals.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
-            loc, maybeMaskedLhs->getResult(0),
+        lhsVals.push_back(vector::ExtractStridedSliceOp::create(
+            rewriter, loc, maybeMaskedLhs->getResult(0),
             /*offsets=*/ArrayRef<int64_t>{0, w * strideW + kw * dilationW, 0},
             inOutSliceSizes, inOutStrides));
       }
     }
     // Extract rhs slice of size {c} @ [kw].
     for (int64_t kw = 0; kw < kwSize; ++kw) {
-      rhsVals.push_back(rewriter.create<vector::ExtractOp>(
-          loc, maybeMaskedRhs->getResult(0),
-          /*offsets=*/ArrayRef<int64_t>{kw}));
+      rhsVals.push_back(
+          vector::ExtractOp::create(rewriter, loc, maybeMaskedRhs->getResult(0),
+                                    /*offsets=*/ArrayRef<int64_t>{kw}));
     }
     // Extract res slice: {n, wSizeStep, c} @ [0, w, 0].
     for (int64_t w = 0; w < wSize; w += wSizeStep) {
-      resVals.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
-          loc, maybeMaskedRes->getResult(0),
+      resVals.push_back(vector::ExtractStridedSliceOp::create(
+          rewriter, loc, maybeMaskedRes->getResult(0),
           /*offsets=*/ArrayRef<int64_t>{0, w, 0}, inOutSliceSizes,
           inOutStrides));
     }
@@ -3937,17 +3946,19 @@ struct Conv1DGenerator
         if (flatten) {
           // Flatten the input and output vectors (collapse the channel
           // dimension)
-          lhsVal = rewriter.create<vector::ShapeCastOp>(
-              loc, lhsTypeAfterFlattening, lhsVals[linearIndex(kw, w)]);
-          resVal = rewriter.create<vector::ShapeCastOp>(
-              loc, resTypeAfterFlattening, resVals[w]);
+          lhsVal =
+              vector::ShapeCastOp::create(rewriter, loc, lhsTypeAfterFlattening,
+                                          lhsVals[linearIndex(kw, w)]);
+          resVal = vector::ShapeCastOp::create(
+              rewriter, loc, resTypeAfterFlattening, resVals[w]);
         }
         resVals[w] = depthwiseConv1dSliceAsMulAcc(rewriter, loc, lhsVal,
                                                   rhsVals[kw], resVal, flatten);
         if (flatten) {
           // Un-flatten the output vector (restore the channel dimension)
-          resVals[w] = rewriter.create<vector::ShapeCastOp>(
-              loc, VectorType::get(inOutSliceSizes, resEltType), resVals[w]);
+          resVals[w] = vector::ShapeCastOp::create(
+              rewriter, loc, VectorType::get(inOutSliceSizes, resEltType),
+              resVals[w]);
         }
       }
     }
@@ -3965,8 +3976,8 @@ struct Conv1DGenerator
     // Write back res slice: {n, wSizeStep, c} @ [0, w, 0].
     // This does not depend on kw.
     for (int64_t w = 0; w < wSize; w += wSizeStep) {
-      maybeMaskedRes = rewriter.create<vector::InsertStridedSliceOp>(
-          loc, resVals[w], maybeMaskedRes->getResult(0),
+      maybeMaskedRes = vector::InsertStridedSliceOp::create(
+          rewriter, loc, resVals[w], maybeMaskedRes->getResult(0),
           /*offsets=*/ArrayRef<int64_t>{0, w, 0},
           /*strides=*/ArrayRef<int64_t>{1, 1, 1});
     }
@@ -3975,8 +3986,8 @@ struct Conv1DGenerator
     //===------------------------------------------------------------------===//
 
     // Write back res slice of size {n, w, c} @ [0, 0, 0].
-    Operation *resOut = rewriter.create<vector::TransferWriteOp>(
-        loc, maybeMaskedRes->getResult(0), resShaped,
+    Operation *resOut = vector::TransferWriteOp::create(
+        rewriter, loc, maybeMaskedRes->getResult(0), resShaped,
         ValueRange{zero, zero, zero});
     return maybeMaskXferOp(resType.getShape(), resType.getScalableDims(),
                            resOut);
@@ -4013,11 +4024,11 @@ struct Conv1DGenerator
           indices.push_back(j);
       }
 
-      rhs = rewriter.create<vector::ShuffleOp>(loc, rhs, rhs, indices);
+      rhs = vector::ShuffleOp::create(rewriter, loc, rhs, rhs, indices);
     }
     // Broadcast the filter to match the output vector
-    rhs = rewriter.create<vector::BroadcastOp>(
-        loc, resTy.clone(rhsTy.getElementType()), rhs);
+    rhs = vector::BroadcastOp::create(rewriter, loc,
+                                      resTy.clone(rhsTy.getElementType()), rhs);
 
     rhs = promote(rewriter, loc, rhs, resTy);
 
@@ -4025,10 +4036,10 @@ struct Conv1DGenerator
       return nullptr;
 
     if (isa<FloatType>(resTy.getElementType()))
-      return rewriter.create<vector::FMAOp>(loc, lhs, rhs, res);
+      return vector::FMAOp::create(rewriter, loc, lhs, rhs, res);
 
-    auto mul = rewriter.create<arith::MulIOp>(loc, lhs, rhs);
-    return rewriter.create<arith::AddIOp>(loc, mul, res);
+    auto mul = arith::MulIOp::create(rewriter, loc, lhs, rhs);
+    return arith::AddIOp::create(rewriter, loc, mul, res);
   }
 
   /// Entry point for non-channeled convolution:
diff --git a/mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp b/mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp
index 9fd084487e3fd..669fefcd86de1 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp
@@ -201,11 +201,12 @@ Value create2DTransformMatrix(OpBuilder &builder, Location loc,
                               TransformMatrix transform, Type type) {
   ArrayRef<float> constVec(transform.table, transform.rows * transform.cols);
 
-  return builder.create<arith::ConstantOp>(
-      loc, DenseFPElementsAttr::get(
-               RankedTensorType::get(
-                   SmallVector<int64_t>{transform.rows, transform.cols}, type),
-               constVec));
+  return arith::ConstantOp::create(
+      builder, loc,
+      DenseFPElementsAttr::get(
+          RankedTensorType::get(
+              SmallVector<int64_t>{transform.rows, transform.cols}, type),
+          constVec));
 }
 
 /// Extract height x width data from 4D tensors.
@@ -233,8 +234,8 @@ Value extract2DDataFrom4D(OpBuilder &builder, Location loc, Value source,
 
   auto extractFilterType =
       RankedTensorType::get({extractHeight, extractWidth}, elementType);
-  auto extractFilterOp = builder.create<tensor::ExtractSliceOp>(
-      loc, extractFilterType, source, offsets, sizes, strides);
+  auto extractFilterOp = tensor::ExtractSliceOp::create(
+      builder, loc, extractFilterType, source, offsets, sizes, strides);
 
   return extractFilterOp;
 }
@@ -267,8 +268,8 @@ Value extract2DDataFrom6D(OpBuilder &builder, Location loc, Value source,
   SmallVector<OpFoldResult> strides(srcSize, oneIndex);
 
   auto extractFilterType = RankedTensorType::get({height, width}, elementType);
-  auto extractFilterOp = builder.create<tensor::ExtractSliceOp>(
-      loc, extractFilterType, source, offsets, sizes, strides);
+  auto extractFilterOp = tensor::ExtractSliceOp::create(
+      builder, loc, extractFilterType, source, offsets, sizes, strides);
 
   return extractFilterOp;
 }
@@ -293,8 +294,8 @@ Value insert2DDataTo4D(OpBuilder &builder, Location loc, Value source,
   retSizes[widthIdx] = builder.getIndexAttr(width);
   SmallVector<OpFoldResult> strides(destSize, oneIndex);
 
-  auto insertSliceOp = builder.create<tensor::InsertSliceOp>(
-      loc, source, dest, retOffsets, retSizes, strides);
+  auto insertSliceOp = tensor::InsertSliceOp::create(
+      builder, loc, source, dest, retOffsets, retSizes, strides);
 
   return insertSliceOp;
 }
@@ -321,8 +322,8 @@ Value insert2DDataTo6D(OpBuilder &builder, Location loc, Value source,
   retSizes[widthIdx] = builder.getIndexAttr(width);
   SmallVector<OpFoldResult> strides(destSize, oneIndex);
 
-  auto insertSliceOp = builder.create<tensor::InsertSliceOp>(
-      loc, source, dest, retOffsets, retSizes, strides);
+  auto insertSliceOp = tensor::InsertSliceOp::create(
+      builder, loc, source, dest, retOffsets, retSizes, strides);
 
   return insertSliceOp;
 }
@@ -372,7 +373,7 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,
   if (filterW != r && filterW != 1)
     return Value();
 
-  Value zeroIdx = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+  Value zeroIdx = arith::ConstantIndexOp::create(rewriter, loc, 0);
   auto buildBody = [&](OpBuilder &builder, Location loc, ValueRange ivs,
                        ValueRange args) -> scf::ValueVector {
     Value FIter = ivs[0];
@@ -386,8 +387,8 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,
 
     int64_t retRows = 1;
     Value matmulRetValue = extractFilter;
-    Value zero = builder.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(elementType));
+    Value zero = arith::ConstantOp::create(builder, loc,
+                                           rewriter.getZeroAttr(elementType));
     if (leftTransform) {
       // Get constant transform matrix G.
       auto it = GMatrices.find(fmr);
@@ -401,12 +402,14 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,
           builder
               .create<tensor::EmptyOp>(loc, matmulType.getShape(), elementType)
               .getResult();
-      auto init = builder.create<linalg::FillOp>(loc, zero, empty).getResult(0);
+      auto init =
+          linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
 
       Value G = create2DTransformMatrix(builder, loc, GMatrix, elementType);
       // Multiply G x g.
-      auto matmulOp = builder.create<linalg::MatmulOp>(
-          loc, matmulType, ValueRange{G, extractFilter}, ValueRange{init});
+      auto matmulOp = linalg::MatmulOp::create(builder, loc, matmulType,
+                                               ValueRange{G, extractFilter},
+                                               ValueRange{init});
       matmulRetValue = matmulOp.getResult(0);
     }
 
@@ -423,12 +426,14 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,
           builder
               .create<tensor::EmptyOp>(loc, matmulType.getShape(), elementType)
               .getResult();
-      auto init = builder.create<linalg::FillOp>(loc, zero, empty).getResult(0);
+      auto init =
+          linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
 
       Value GT = create2DTransformMatrix(builder, loc, GTMatrix, elementType);
       // Multiply u = (G x g) x GT.
-      auto matmulOp = builder.create<linalg::MatmulOp>(
-          loc, matmulType, ValueRange{matmulRetValue, GT}, ValueRange{init});
+      auto matmulOp = linalg::MatmulOp::create(builder, loc, matmulType,
+                                               ValueRange{matmulRetValue, GT},
+                                               ValueRange{init});
       matmulRetValue = matmulOp.getResult(0);
     }
 
@@ -445,9 +450,9 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,
     return {insertSliceOp};
   };
 
-  auto fUpperBound = rewriter.create<arith::ConstantIndexOp>(loc, filterF);
-  auto cUpperBound = rewriter.create<arith::ConstantIndexOp>(loc, filterC);
-  auto oneStep = rewriter.create<arith::ConstantIndexOp>(loc, 1);
+  auto fUpperBound = arith::ConstantIndexOp::create(rewriter, loc, filterF);
+  auto cUpperBound = arith::ConstantIndexOp::create(rewriter, loc, filterC);
+  auto oneStep = arith::ConstantIndexOp::create(rewriter, loc, 1);
   scf::LoopNest loops = scf::buildLoopNest(
       rewriter, loc, {zeroIdx, zeroIdx}, {fUpperBound, cUpperBound},
       {oneStep, oneStep}, {retValue}, buildBody);
@@ -516,10 +521,11 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,
     auto identityAffineMap = rewriter.getMultiDimIdentityMap(1);
     auto affineMap =
         AffineMap::get(1, 0, {builder.getAffineDimExpr(0) * m}, context);
-    Value heightOffset = builder.create<affine::AffineApplyOp>(
-        loc, leftTransform ? affineMap : identityAffineMap, tileHIter);
-    Value widthOffset = builder.create<affine::AffineApplyOp>(
-        loc, rightTransform ? affineMap : identityAffineMap, tileWIter);
+    Value heightOffset = affine::AffineApplyOp::create(
+        builder, loc, leftTransform ? affineMap : identityAffineMap, tileHIter);
+    Value widthOffset = affine::AffineApplyOp::create(
+        builder, loc, rightTransform ? affineMap : identityAffineMap,
+        tileWIter);
 
     // Extract (H, W) from (N, H, W, C).
     auto extractInput =
@@ -530,8 +536,8 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,
     int64_t retRows = 1;
     int64_t retCols = 1;
     Value matmulRetValue = extractInput;
-    Value zero = builder.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(elementType));
+    Value zero = arith::ConstantOp::create(builder, loc,
+                                           rewriter.getZeroAttr(elementType));
     if (leftTransform) {
       // Get constant transform matrix BT.
       auto it = BTMatrices.find(fmr);
@@ -545,13 +551,15 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,
           builder
               .create<tensor::EmptyOp>(loc, matmulType.getShape(), elementType)
               .getResult();
-      auto init = builder.create<linalg::FillOp>(loc, zero, empty).getResult(0);
+      auto init =
+          linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
 
       Value BT =
           create2DTransformMatrix(builder, loc, BTMatrix, builder.getF32Type());
       // Multiply BT x d.
-      auto matmulOp = builder.create<linalg::MatmulOp>(
-          loc, matmulType, ValueRange{BT, matmulRetValue}, ValueRange{init});
+      auto matmulOp = linalg::MatmulOp::create(builder, loc, matmulType,
+                                               ValueRange{BT, matmulRetValue},
+                                               ValueRange{init});
       matmulRetValue = matmulOp.getResult(0);
     }
 
@@ -568,12 +576,14 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,
           builder
               .create<tensor::EmptyOp>(loc, matmulType.getShape(), elementType)
               .getResult();
-      auto init = builder.create<linalg::FillOp>(loc, zero, empty).getResult(0);
+      auto init =
+          linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
       Value B =
           create2DTransformMatrix(builder, loc, BMatrix, builder.getF32Type());
       // Multiply v = (BT x d) x B.
-      auto matmulOp = builder.create<linalg::MatmulOp>(
-          loc, matmulType, ValueRange{matmulRetValue, B}, ValueRange{init});
+      auto matmulOp = linalg::MatmulOp::create(builder, loc, matmulType,
+                                               ValueRange{matmulRetValue, B},
+                                               ValueRange{init});
       matmulRetValue = matmulOp.getResult(0);
     }
 
@@ -586,12 +596,12 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,
     return {combinedVal};
   };
 
-  auto zeroIdx = rewriter.create<arith::ConstantIndexOp>(loc, 0);
-  auto tileHBound = rewriter.create<arith::ConstantIndexOp>(loc, tileH);
-  auto tileWBound = rewriter.create<arith::ConstantIndexOp>(loc, tileW);
-  auto nUpperBound = rewriter.create<arith::ConstantIndexOp>(loc, inputN);
-  auto cUpperBound = rewriter.create<arith::ConstantIndexOp>(loc, inputC);
-  auto oneStep = rewriter.create<arith::ConstantIndexOp>(loc, 1);
+  auto zeroIdx = arith::ConstantIndexOp::create(rewriter, loc, 0);
+  auto tileHBound = arith::ConstantIndexOp::create(rewriter, loc, tileH);
+  auto tileWBound = arith::ConstantIndexOp::create(rewriter, loc, tileW);
+  auto nUpperBound = arith::ConstantIndexOp::create(rewriter, loc, inputN);
+  auto cUpperBound = arith::ConstantIndexOp::create(rewriter, loc, inputC);
+  auto oneStep = arith::ConstantIndexOp::create(rewriter, loc, 1);
   scf::LoopNest loops = scf::buildLoopNest(
       rewriter, loc, {zeroIdx, zeroIdx, zeroIdx, zeroIdx},
       {tileHBound, tileWBound, nUpperBound, cUpperBound},
@@ -629,8 +639,8 @@ static Value matrixMultiply(RewriterBase &rewriter, Location loc,
       {filterShape[0] * filterShape[1], filterShape[2], filterShape[3]},
       filterElementType);
   SmallVector<ReassociationIndices> filterReassoc = {{0, 1}, {2}, {3}};
-  Value collapseFilter = rewriter.create<tensor::CollapseShapeOp>(
-      loc, filterReassocType, transformedFilter, filterReassoc);
+  Value collapseFilter = tensor::CollapseShapeOp::create(
+      rewriter, loc, filterReassocType, transformedFilter, filterReassoc);
 
   // Convert (alphaH, alphaW, tileH, tileW, N, C) to
   // (alphaH x alphaW, tileH x tileW x N, C) for input.
@@ -643,8 +653,8 @@ static Value matrixMultiply(RewriterBase &rewriter, Location loc,
        inputShape[2] * inputShape[3] * inputShape[4], inputShape[5]},
       inputElementType);
   SmallVector<ReassociationIndices> inputReassoc = {{0, 1}, {2, 3, 4}, {5}};
-  Value collapseInput = rewriter.create<tensor::CollapseShapeOp>(
-      loc, inputReassocType, transformedInput, inputReassoc);
+  Value collapseInput = tensor::CollapseShapeOp::create(
+      rewriter, loc, inputReassocType, transformedInput, inputReassoc);
 
   // Batched matrix multiply.
   auto matmulType = RankedTensorType::get(
@@ -655,12 +665,12 @@ static Value matrixMultiply(RewriterBase &rewriter, Location loc,
                     .create<tensor::EmptyOp>(loc, matmulType.getShape(),
                                              outputElementType)
                     .getResult();
-  Value zero = rewriter.create<arith::ConstantOp>(
-      loc, rewriter.getZeroAttr(outputElementType));
-  Value init = rewriter.create<linalg::FillOp>(loc, zero, empty).getResult(0);
+  Value zero = arith::ConstantOp::create(
+      rewriter, loc, rewriter.getZeroAttr(outputElementType));
+  Value init = linalg::FillOp::create(rewriter, loc, zero, empty).getResult(0);
 
-  auto matmulOp = rewriter.create<linalg::BatchMatmulOp>(
-      loc, matmulType, ValueRange({collapseInput, collapseFilter}),
+  auto matmulOp = linalg::BatchMatmulOp::create(
+      rewriter, loc, matmulType, ValueRange({collapseInput, collapseFilter}),
       ValueRange{init});
 
   // The result shape of batch matmul is (alphaH x alphaW, tileH x tileW x N, F)
@@ -670,8 +680,8 @@ static Value matrixMultiply(RewriterBase &rewriter, Location loc,
       RankedTensorType::get({inputShape[0], inputShape[1], inputShape[2],
                              inputShape[3], inputShape[4], filterShape[3]},
                             outputElementType);
-  auto expandOutput = rewriter.create<tensor::ExpandShapeOp>(
-      loc, outputReassocType, matmulOp.getResult(0), outputReassoc);
+  auto expandOutput = tensor::ExpandShapeOp::create(
+      rewriter, loc, outputReassocType, matmulOp.getResult(0), outputReassoc);
   return expandOutput;
 }
 
@@ -750,16 +760,17 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
     int64_t retRows = leftTransform ? ATMatrix.rows : 1;
 
     Value matmulRetValue = extractValue;
-    Value zero = builder.create<arith::ConstantOp>(
-        loc, rewriter.getZeroAttr(elementType));
+    Value zero = arith::ConstantOp::create(builder, loc,
+                                           rewriter.getZeroAttr(elementType));
 
     auto identityAffineMap = rewriter.getMultiDimIdentityMap(1);
     auto affineMap =
         AffineMap::get(1, 0, {builder.getAffineDimExpr(0) * m}, context);
-    Value heightOffset = builder.create<affine::AffineApplyOp>(
-        loc, leftTransform ? affineMap : identityAffineMap, tileHIter);
-    Value widthOffset = builder.create<affine::AffineApplyOp>(
-        loc, rightTransform ? affineMap : identityAffineMap, tileWIter);
+    Value heightOffset = affine::AffineApplyOp::create(
+        builder, loc, leftTransform ? affineMap : identityAffineMap, tileHIter);
+    Value widthOffset = affine::AffineApplyOp::create(
+        builder, loc, rightTransform ? affineMap : identityAffineMap,
+        tileWIter);
 
     Value outInitVal =
         extract2DDataFrom4D(builder, loc, args[0], NIter, FIter, heightOffset,
@@ -775,13 +786,14 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
                          .create<tensor::EmptyOp>(loc, matmulType.getShape(),
                                                   elementType)
                          .getResult();
-        init = builder.create<linalg::FillOp>(loc, zero, empty).getResult(0);
+        init = linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
       }
 
       Value AT = create2DTransformMatrix(builder, loc, ATMatrix, elementType);
       // Multiply AT x m.
-      auto matmulOp = builder.create<linalg::MatmulOp>(
-          loc, matmulType, ValueRange{AT, matmulRetValue}, ValueRange{init});
+      auto matmulOp = linalg::MatmulOp::create(builder, loc, matmulType,
+                                               ValueRange{AT, matmulRetValue},
+                                               ValueRange{init});
       matmulRetValue = matmulOp.getResult(0);
     }
 
@@ -794,20 +806,21 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
                          .create<tensor::EmptyOp>(loc, matmulType.getShape(),
                                                   elementType)
                          .getResult();
-        init = builder.create<linalg::FillOp>(loc, zero, empty).getResult(0);
+        init = linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
       }
 
       Value A = create2DTransformMatrix(builder, loc, AMatrix, elementType);
       // Multiply y = (AT x m) x A.
-      auto matmulOp = builder.create<linalg::MatmulOp>(
-          loc, matmulType, ValueRange{matmulRetValue, A}, ValueRange{init});
+      auto matmulOp = linalg::MatmulOp::create(builder, loc, matmulType,
+                                               ValueRange{matmulRetValue, A},
+                                               ValueRange{init});
       matmulRetValue = matmulOp.getResult(0);
     }
 
     if (scalarFactor != 1) {
       // Multiply by scalar factor and add outInitVal.
-      Value scalarFactorValue = builder.create<arith::ConstantOp>(
-          loc, FloatAttr::get(elementType, scalarFactor));
+      Value scalarFactorValue = arith::ConstantOp::create(
+          builder, loc, FloatAttr::get(elementType, scalarFactor));
       auto matmulType = RankedTensorType::get({retRows, retCols}, elementType);
       auto identityAffineMap = rewriter.getMultiDimIdentityMap(2);
       SmallVector<AffineMap> affineMaps = {
@@ -824,12 +837,12 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
                       utils::IteratorType::parallel},
                   [&](OpBuilder &nestedBuilder, Location nestedLoc,
                       ValueRange args) {
-                    auto mulf = nestedBuilder.create<arith::MulFOp>(
-                        nestedLoc, args[0], args[1]);
-                    auto addf = nestedBuilder.create<arith::AddFOp>(
-                        nestedLoc, mulf.getResult(), args[2]);
-                    nestedBuilder.create<linalg::YieldOp>(nestedLoc,
-                                                          addf.getResult());
+                    auto mulf = arith::MulFOp::create(nestedBuilder, nestedLoc,
+                                                      args[0], args[1]);
+                    auto addf = arith::AddFOp::create(
+                        nestedBuilder, nestedLoc, mulf.getResult(), args[2]);
+                    linalg::YieldOp::create(nestedBuilder, nestedLoc,
+                                            addf.getResult());
                   })
               .getResult(0);
     }
@@ -847,12 +860,12 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
 
   int64_t tilwH = valueShape[2];
   int64_t tileW = valueShape[3];
-  auto zeroIdx = rewriter.create<arith::ConstantIndexOp>(loc, 0);
-  auto tileHBound = rewriter.create<arith::ConstantIndexOp>(loc, tilwH);
-  auto tileWBound = rewriter.create<arith::ConstantIndexOp>(loc, tileW);
-  auto nUpperBound = rewriter.create<arith::ConstantIndexOp>(loc, valueN);
-  auto fUpperBound = rewriter.create<arith::ConstantIndexOp>(loc, valueF);
-  auto oneStep = rewriter.create<arith::ConstantIndexOp>(loc, 1);
+  auto zeroIdx = arith::ConstantIndexOp::create(rewriter, loc, 0);
+  auto tileHBound = arith::ConstantIndexOp::create(rewriter, loc, tilwH);
+  auto tileWBound = arith::ConstantIndexOp::create(rewriter, loc, tileW);
+  auto nUpperBound = arith::ConstantIndexOp::create(rewriter, loc, valueN);
+  auto fUpperBound = arith::ConstantIndexOp::create(rewriter, loc, valueF);
+  auto oneStep = arith::ConstantIndexOp::create(rewriter, loc, 1);
   scf::LoopNest loops = scf::buildLoopNest(
       rewriter, loc, {zeroIdx, zeroIdx, zeroIdx, zeroIdx},
       {tileHBound, tileWBound, nUpperBound, fUpperBound},
@@ -867,8 +880,8 @@ static Value padToAlignedTensor(RewriterBase &rewriter, Location loc,
   auto valueType = cast<ShapedType>(value.getType());
   Type elementType = valueType.getElementType();
   auto alignedType = RankedTensorType::get(alignedShape, elementType);
-  Value padValue = rewriter.create<arith::ConstantOp>(
-      loc, elementType, rewriter.getZeroAttr(elementType));
+  Value padValue = arith::ConstantOp::create(rewriter, loc, elementType,
+                                             rewriter.getZeroAttr(elementType));
 
   return linalg::makeComposedPadHighOp(rewriter, loc, alignedType, value,
                                        padValue, false);
@@ -887,8 +900,8 @@ static Value extractFromAlignedTensor(RewriterBase &rewriter, Location loc,
   SmallVector<OpFoldResult> sizes =
       getAsOpFoldResult(rewriter.getI64ArrayAttr(extractedShape));
 
-  return rewriter.create<tensor::ExtractSliceOp>(loc, extractedType, value,
-                                                 offsets, sizes, strides);
+  return tensor::ExtractSliceOp::create(rewriter, loc, extractedType, value,
+                                        offsets, sizes, strides);
 }
 
 /// Utility function to check all values in the attribute are 1.
@@ -979,10 +992,10 @@ winogradConv2DHelper(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp,
   int64_t tileW = llvm::divideCeilSigned(outputW, widthM);
   auto retType = RankedTensorType::get({alphaH, alphaW, filterC, filterF},
                                        filterElementType);
-  Value retValue = rewriter.create<tensor::EmptyOp>(loc, retType.getShape(),
-                                                    filterElementType);
-  auto transformedFilter = rewriter.create<linalg::WinogradFilterTransformOp>(
-      loc, retType, filter, retValue, fmr);
+  Value retValue = tensor::EmptyOp::create(rewriter, loc, retType.getShape(),
+                                           filterElementType);
+  auto transformedFilter = linalg::WinogradFilterTransformOp::create(
+      rewriter, loc, retType, filter, retValue, fmr);
 
   // --- Create operation for input transform ---
 
@@ -998,10 +1011,10 @@ winogradConv2DHelper(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp,
 
   retType = RankedTensorType::get(
       {alphaH, alphaW, tileH, tileW, inputN, inputC}, inputElementType);
-  retValue = rewriter.create<tensor::EmptyOp>(loc, retType.getShape(),
-                                              inputElementType);
-  auto transformedInput = rewriter.create<linalg::WinogradInputTransformOp>(
-      loc, retType, input, retValue, fmr);
+  retValue = tensor::EmptyOp::create(rewriter, loc, retType.getShape(),
+                                     inputElementType);
+  auto transformedInput = linalg::WinogradInputTransformOp::create(
+      rewriter, loc, retType, input, retValue, fmr);
 
   Type outputElementType = outputType.getElementType();
   Value matmulRet = matrixMultiply(rewriter, loc, transformedFilter,
@@ -1023,8 +1036,8 @@ winogradConv2DHelper(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp,
     outputType = alignedOutputType;
   }
 
-  Value transformedOutput = rewriter.create<linalg::WinogradOutputTransformOp>(
-      loc, outputType, matmulRet, output, fmr);
+  Value transformedOutput = linalg::WinogradOutputTransformOp::create(
+      rewriter, loc, outputType, matmulRet, output, fmr);
 
   // When output size is not aligned with output tile size, extract the
   // value from the padded buffer.
diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index 14d62005e2a07..3593b5348d268 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -320,14 +320,14 @@ GenericOp makeMemRefCopyOp(OpBuilder &b, Location loc, Value from, Value to) {
       AffineMap::getMultiDimIdentityMap(memrefTypeTo.getRank(), b.getContext());
   SmallVector<utils::IteratorType> iteratorTypes(memrefTypeTo.getRank(),
                                                  utils::IteratorType::parallel);
-  return b.create<linalg::GenericOp>(
-      loc,
+  return linalg::GenericOp::create(
+      b, loc,
       /*inputs=*/from,
       /*outputs=*/to,
       /*indexingMaps=*/llvm::ArrayRef({id, id}),
       /*iteratorTypes=*/iteratorTypes,
       [](OpBuilder &b, Location loc, ValueRange args) {
-        b.create<linalg::YieldOp>(loc, args.front());
+        linalg::YieldOp::create(b, loc, args.front());
       });
 }
 
@@ -483,8 +483,8 @@ static void generateParallelLoopNest(
   case DistributionMethod::None: {
     // Generate a single parallel loop-nest operation for all outermost
     // parallel loops and recurse.
-    b.create<scf::ParallelOp>(
-        loc, lbs.take_front(numProcessed), ubs.take_front(numProcessed),
+    scf::ParallelOp::create(
+        b, loc, lbs.take_front(numProcessed), ubs.take_front(numProcessed),
         steps.take_front(numProcessed),
         [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange localIvs) {
           ivStorage.append(localIvs.begin(), localIvs.end());
@@ -499,8 +499,8 @@ static void generateParallelLoopNest(
   case DistributionMethod::Cyclic: {
     // Generate a single parallel loop-nest operation for all outermost
     // parallel loops and recurse.
-    b.create<scf::ParallelOp>(
-        loc, lbs.take_front(numProcessed), ubs.take_front(numProcessed),
+    scf::ParallelOp::create(
+        b, loc, lbs.take_front(numProcessed), ubs.take_front(numProcessed),
         steps.take_front(numProcessed),
         [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange localIvs) {
           ivStorage.append(localIvs.begin(), localIvs.end());
@@ -519,13 +519,13 @@ static void generateParallelLoopNest(
     for (unsigned i = 1; i < numProcessed; ++i)
       cond = ab._and(cond, ab.slt(lbs[i], ubs[i]));
     ivStorage.append(lbs.begin(), std::next(lbs.begin(), numProcessed));
-    b.create<scf::IfOp>(loc, cond, [&](OpBuilder &b, Location loc) {
+    scf::IfOp::create(b, loc, cond, [&](OpBuilder &b, Location loc) {
       generateParallelLoopNest(b, loc, lbs.drop_front(numProcessed),
                                ubs.drop_front(numProcessed),
                                steps.drop_front(numProcessed),
                                iteratorTypes.drop_front(numProcessed),
                                remainderProcInfo, bodyBuilderFn, ivStorage);
-      b.create<scf::YieldOp>(loc, ValueRange{});
+      scf::YieldOp::create(b, loc, ValueRange{});
     });
     return;
   }
@@ -595,13 +595,13 @@ static Operation *materializeTiledShape(OpBuilder &builder, Location loc,
   auto shapedType = dyn_cast<ShapedType>(valueToTile.getType());
   auto *sliceOp = TypeSwitch<ShapedType, Operation *>(shapedType)
                       .Case([&](MemRefType) {
-                        return builder.create<memref::SubViewOp>(
-                            loc, valueToTile, sliceParams.offsets,
+                        return memref::SubViewOp::create(
+                            builder, loc, valueToTile, sliceParams.offsets,
                             sliceParams.sizes, sliceParams.strides);
                       })
                       .Case([&](RankedTensorType) {
-                        return builder.create<tensor::ExtractSliceOp>(
-                            loc, valueToTile, sliceParams.offsets,
+                        return tensor::ExtractSliceOp::create(
+                            builder, loc, valueToTile, sliceParams.offsets,
                             sliceParams.sizes, sliceParams.strides);
                       })
                       .Default([](ShapedType) -> Operation * {
@@ -793,8 +793,8 @@ SmallVector<Value> insertSlicesBack(OpBuilder &builder, Location loc,
     // `tiledOperands`.
     Value outputTensor = operands[opOperand.getOperandNumber()];
     if (auto sliceOp = outputTensor.getDefiningOp<tensor::ExtractSliceOp>()) {
-      Value inserted = builder.create<tensor::InsertSliceOp>(
-          loc, sliceOp.getSource().getType(), results[resultIdx],
+      Value inserted = tensor::InsertSliceOp::create(
+          builder, loc, sliceOp.getSource().getType(), results[resultIdx],
           sliceOp.getSource(), sliceOp.getOffsets(), sliceOp.getSizes(),
           sliceOp.getStrides(), sliceOp.getStaticOffsets(),
           sliceOp.getStaticSizes(), sliceOp.getStaticStrides());



More information about the Mlir-commits mailing list