[Mlir-commits] [mlir] 972ac59 - [mlir][NFC] update `mlir/Dialect` create APIs (21/n) (#149928)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Tue Jul 22 05:15:30 PDT 2025


Author: Maksim Levental
Date: 2025-07-22T08:15:27-04:00
New Revision: 972ac59c9af4ad47af0b3542ae936b3470727e5f

URL: https://github.com/llvm/llvm-project/commit/972ac59c9af4ad47af0b3542ae936b3470727e5f
DIFF: https://github.com/llvm/llvm-project/commit/972ac59c9af4ad47af0b3542ae936b3470727e5f.diff

LOG: [mlir][NFC] update `mlir/Dialect` create APIs (21/n) (#149928)

See https://github.com/llvm/llvm-project/pull/147168 for more info.

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorInterfaces.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseAssembler.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseReinterpretMap.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseSpaceCollapse.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/StageSparseOperations.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
    mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
    mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorIterator.cpp
    mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 38246b96977c8..1a9d9e158ee75 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -559,7 +559,8 @@ SparseTensorEncodingAttr::translateCrds(OpBuilder &builder, Location loc,
   SmallVector<Type> retType(
       dir == CrdTransDirectionKind::lvl2dim ? getDimRank() : getLvlRank(),
       builder.getIndexType());
-  auto transOp = builder.create<CrdTranslateOp>(loc, retType, crds, dir, *this);
+  auto transOp =
+      CrdTranslateOp::create(builder, loc, retType, crds, dir, *this);
   return transOp.getOutCrds();
 }
 
@@ -1481,7 +1482,7 @@ LogicalResult CrdTranslateOp::fold(FoldAdaptor adaptor,
 
 void LvlOp::build(OpBuilder &builder, OperationState &state, Value source,
                   int64_t index) {
-  Value val = builder.create<arith::ConstantIndexOp>(state.location, index);
+  Value val = arith::ConstantIndexOp::create(builder, state.location, index);
   return build(builder, state, source, val);
 }
 

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorInterfaces.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorInterfaces.cpp
index 9c84f4c25866f..abb37a5e10b9a 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorInterfaces.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorInterfaces.cpp
@@ -41,8 +41,8 @@ LogicalResult sparse_tensor::detail::stageWithSortImpl(
 
   // -> sort
   Type dstCOOTp = dstStt.getCOOType(/*ordered=*/true);
-  Value dstCOO = rewriter.create<ReorderCOOOp>(
-      loc, dstCOOTp, srcCOO, SparseTensorSortKind::HybridQuickSort);
+  Value dstCOO = ReorderCOOOp::create(rewriter, loc, dstCOOTp, srcCOO,
+                                      SparseTensorSortKind::HybridQuickSort);
 
   // -> dest.
   if (dstCOO.getType() == finalTp) {

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseAssembler.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseAssembler.cpp
index 8ee801ba46349..40c182f9dbb37 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseAssembler.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseAssembler.cpp
@@ -88,13 +88,13 @@ static void convVals(OpBuilder &builder, Location loc, TypeRange types,
         } else if (directOut) {
           Value mem;
           if (kind == SparseTensorFieldKind::PosMemRef)
-            mem = builder.create<sparse_tensor::ToPositionsOp>(loc, inputs[0],
-                                                               lv);
+            mem = sparse_tensor::ToPositionsOp::create(builder, loc, inputs[0],
+                                                       lv);
           else if (kind == SparseTensorFieldKind::CrdMemRef)
-            mem = builder.create<sparse_tensor::ToCoordinatesOp>(loc, inputs[0],
-                                                                 lv);
+            mem = sparse_tensor::ToCoordinatesOp::create(builder, loc,
+                                                         inputs[0], lv);
           else
-            mem = builder.create<sparse_tensor::ToValuesOp>(loc, inputs[0]);
+            mem = sparse_tensor::ToValuesOp::create(builder, loc, inputs[0]);
           toVals.push_back(mem);
         } else {
           ShapedType rtp = cast<ShapedType>(t);
@@ -109,7 +109,7 @@ static void convVals(OpBuilder &builder, Location loc, TypeRange types,
 
     if (isIn) {
       // Assemble multiple inputs into a single sparse tensor.
-      auto a = builder.create<sparse_tensor::AssembleOp>(loc, rtp, inputs);
+      auto a = sparse_tensor::AssembleOp::create(builder, loc, rtp, inputs);
       toVals.push_back(a.getResult());
     } else if (!directOut) {
       // Disassemble a single sparse input into multiple outputs.
@@ -117,7 +117,7 @@ static void convVals(OpBuilder &builder, Location loc, TypeRange types,
       unsigned len = retTypes.size();
       retTypes.append(cntTypes);
       auto d =
-          builder.create<sparse_tensor::DisassembleOp>(loc, retTypes, inputs);
+          sparse_tensor::DisassembleOp::create(builder, loc, retTypes, inputs);
       for (unsigned i = 0; i < len; i++)
         toVals.push_back(d.getResult(i));
     }
@@ -199,8 +199,9 @@ struct SparseFuncAssembler : public OpRewritePattern<func::FuncOp> {
     OpBuilder moduleBuilder(modOp.getBodyRegion());
     unsigned extra = inputTypes.size();
     inputTypes.append(extraTypes);
-    auto func = moduleBuilder.create<func::FuncOp>(
-        loc, orgName, FunctionType::get(context, inputTypes, outputTypes));
+    auto func = func::FuncOp::create(
+        moduleBuilder, loc, orgName,
+        FunctionType::get(context, inputTypes, outputTypes));
     func.setPublic();
 
     // Construct new wrapper method body.
@@ -216,14 +217,14 @@ struct SparseFuncAssembler : public OpRewritePattern<func::FuncOp> {
     // Call the original, now private method. A subsequent inlining pass can
     // determine whether cloning the method body in place is worthwhile.
     auto org = SymbolRefAttr::get(context, wrapper);
-    auto call = rewriter.create<func::CallOp>(loc, funcOp.getResultTypes(), org,
-                                              inputs);
+    auto call = func::CallOp::create(rewriter, loc, funcOp.getResultTypes(),
+                                     org, inputs);
 
     // Convert outputs and return.
     SmallVector<Value> outputs;
     convVals(rewriter, loc, funcOp.getResultTypes(), call.getResults(),
              body->getArguments(), outputs, extra, /*isIn=*/false, directOut);
-    rewriter.create<func::ReturnOp>(loc, outputs);
+    func::ReturnOp::create(rewriter, loc, outputs);
 
     // Finally, migrate a potential c-interface property.
     if (funcOp->getAttrOfType<UnitAttr>(

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
index 0c5912bb73772..02623198c25b5 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
@@ -94,8 +94,8 @@ static FlatSymbolRefAttr getMangledSortHelperFunc(
     OpBuilder::InsertionGuard insertionGuard(builder);
     builder.setInsertionPoint(insertPoint);
     Location loc = insertPoint.getLoc();
-    func = builder.create<func::FuncOp>(
-        loc, nameOstream.str(),
+    func = func::FuncOp::create(
+        builder, loc, nameOstream.str(),
         FunctionType::get(context, operands.getTypes(), resultTypes));
     func.setPrivate();
     createFunc(builder, module, func, xPerm, ny, nTrailingP);
@@ -111,13 +111,13 @@ static void forEachIJPairInXs(
     uint64_t ny,
     function_ref<void(uint64_t, Value, Value, Value)> bodyBuilder) {
   Value cstep = constantIndex(builder, loc, xPerm.getNumResults() + ny);
-  Value iOffset = builder.create<arith::MulIOp>(loc, args[0], cstep);
-  Value jOffset = builder.create<arith::MulIOp>(loc, args[1], cstep);
+  Value iOffset = arith::MulIOp::create(builder, loc, args[0], cstep);
+  Value jOffset = arith::MulIOp::create(builder, loc, args[1], cstep);
   for (unsigned k = 0, e = xPerm.getNumResults(); k < e; k++) {
     unsigned actualK = cast<AffineDimExpr>(xPerm.getResult(k)).getPosition();
     Value ak = constantIndex(builder, loc, actualK);
-    Value i = builder.create<arith::AddIOp>(loc, ak, iOffset);
-    Value j = builder.create<arith::AddIOp>(loc, ak, jOffset);
+    Value i = arith::AddIOp::create(builder, loc, ak, iOffset);
+    Value j = arith::AddIOp::create(builder, loc, ak, jOffset);
     Value buffer = args[xStartIdx];
 
     bodyBuilder(k, i, j, buffer);
@@ -165,10 +165,10 @@ static void forEachIJPairInAllBuffers(
 static void createSwap(OpBuilder &builder, Location loc, ValueRange args,
                        AffineMap xPerm, uint64_t ny) {
   auto swapOnePair = [&](uint64_t unused, Value i, Value j, Value buffer) {
-    Value vi = builder.create<memref::LoadOp>(loc, buffer, i);
-    Value vj = builder.create<memref::LoadOp>(loc, buffer, j);
-    builder.create<memref::StoreOp>(loc, vj, buffer, i);
-    builder.create<memref::StoreOp>(loc, vi, buffer, j);
+    Value vi = memref::LoadOp::create(builder, loc, buffer, i);
+    Value vj = memref::LoadOp::create(builder, loc, buffer, j);
+    memref::StoreOp::create(builder, loc, vj, buffer, i);
+    memref::StoreOp::create(builder, loc, vi, buffer, j);
   };
 
   forEachIJPairInAllBuffers(builder, loc, args, xPerm, ny, swapOnePair);
@@ -193,7 +193,7 @@ static Value createInlinedCompareImplementation(
       OpBuilder::InsertionGuard insertionGuard(builder);
       auto ifOp = cast<scf::IfOp>(val.getDefiningOp());
       builder.setInsertionPointAfter(ifOp);
-      builder.create<scf::YieldOp>(loc, ifOp.getResult(0));
+      scf::YieldOp::create(builder, loc, ifOp.getResult(0));
     }
   };
 
@@ -207,25 +207,25 @@ static Value createInlinedCompareImplementation(
 /// result of the comparison.
 static Value createEqCompare(OpBuilder &builder, Location loc, Value i, Value j,
                              Value x, bool isFirstDim, bool isLastDim) {
-  Value vi = builder.create<memref::LoadOp>(loc, x, i);
-  Value vj = builder.create<memref::LoadOp>(loc, x, j);
+  Value vi = memref::LoadOp::create(builder, loc, x, i);
+  Value vj = memref::LoadOp::create(builder, loc, x, j);
 
   Value res;
   if (isLastDim) {
-    res = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq, vi, vj);
+    res = arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::eq, vi, vj);
     // For 1D, we create a compare without any control flow. Otherwise, we
     // create YieldOp to return the result in the nested if-stmt.
     if (!isFirstDim)
-      builder.create<scf::YieldOp>(loc, res);
+      scf::YieldOp::create(builder, loc, res);
   } else {
     Value ne =
-        builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ne, vi, vj);
-    scf::IfOp ifOp = builder.create<scf::IfOp>(loc, builder.getIntegerType(1),
-                                               ne, /*else=*/true);
+        arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ne, vi, vj);
+    scf::IfOp ifOp = scf::IfOp::create(builder, loc, builder.getIntegerType(1),
+                                       ne, /*else=*/true);
     // If (x[i] != x[j]).
     builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
     Value f = constantI1(builder, loc, false);
-    builder.create<scf::YieldOp>(loc, f);
+    scf::YieldOp::create(builder, loc, f);
 
     // If (x[i] == x[j]). Set up the insertion point for the nested if-stmt that
     // checks the remaining dimensions.
@@ -261,26 +261,27 @@ static Value createInlinedEqCompare(OpBuilder &builder, Location loc,
 static Value createLessThanCompare(OpBuilder &builder, Location loc, Value i,
                                    Value j, Value x, bool isFirstDim,
                                    bool isLastDim) {
-  Value vi = builder.create<memref::LoadOp>(loc, x, i);
-  Value vj = builder.create<memref::LoadOp>(loc, x, j);
+  Value vi = memref::LoadOp::create(builder, loc, x, i);
+  Value vj = memref::LoadOp::create(builder, loc, x, j);
 
   Value res;
   if (isLastDim) {
-    res = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, vi, vj);
+    res =
+        arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ult, vi, vj);
     // For 1D, we create a compare without any control flow. Otherwise, we
     // create YieldOp to return the result in the nested if-stmt.
     if (!isFirstDim)
-      builder.create<scf::YieldOp>(loc, res);
+      scf::YieldOp::create(builder, loc, res);
   } else {
     Value ne =
-        builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ne, vi, vj);
-    scf::IfOp ifOp = builder.create<scf::IfOp>(loc, builder.getIntegerType(1),
-                                               ne, /*else=*/true);
+        arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ne, vi, vj);
+    scf::IfOp ifOp = scf::IfOp::create(builder, loc, builder.getIntegerType(1),
+                                       ne, /*else=*/true);
     // If (x[i] != x[j]).
     builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
     Value lt =
-        builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, vi, vj);
-    builder.create<scf::YieldOp>(loc, lt);
+        arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ult, vi, vj);
+    scf::YieldOp::create(builder, loc, lt);
 
     // If (x[i] == x[j]). Set up the insertion point for the nested if-stmt that
     // checks the remaining dimensions.
@@ -337,17 +338,17 @@ static void createBinarySearchFunc(OpBuilder &builder, ModuleOp module,
   ValueRange args = entryBlock->getArguments();
   Value p = args[hiIdx];
   SmallVector<Type, 2> types(2, p.getType()); // Only two types.
-  scf::WhileOp whileOp = builder.create<scf::WhileOp>(
-      loc, types, SmallVector<Value, 2>{args[loIdx], args[hiIdx]});
+  scf::WhileOp whileOp = scf::WhileOp::create(
+      builder, loc, types, SmallVector<Value, 2>{args[loIdx], args[hiIdx]});
 
   // The before-region of the WhileOp.
   Block *before =
       builder.createBlock(&whileOp.getBefore(), {}, types, {loc, loc});
   builder.setInsertionPointToEnd(before);
-  Value cond1 = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult,
-                                              before->getArgument(0),
-                                              before->getArgument(1));
-  builder.create<scf::ConditionOp>(loc, cond1, before->getArguments());
+  Value cond1 =
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ult,
+                            before->getArgument(0), before->getArgument(1));
+  scf::ConditionOp::create(builder, loc, cond1, before->getArguments());
 
   // The after-region of the WhileOp.
   Block *after =
@@ -357,9 +358,9 @@ static void createBinarySearchFunc(OpBuilder &builder, ModuleOp module,
   Value hi = after->getArgument(1);
   // Compute mid = (lo + hi) >> 1.
   Value c1 = constantIndex(builder, loc, 1);
-  Value mid = builder.create<arith::ShRUIOp>(
-      loc, builder.create<arith::AddIOp>(loc, lo, hi), c1);
-  Value midp1 = builder.create<arith::AddIOp>(loc, mid, c1);
+  Value mid = arith::ShRUIOp::create(
+      builder, loc, arith::AddIOp::create(builder, loc, lo, hi), c1);
+  Value midp1 = arith::AddIOp::create(builder, loc, mid, c1);
 
   // Compare xs[p] < xs[mid].
   SmallVector<Value> compareOperands{p, mid};
@@ -372,12 +373,12 @@ static void createBinarySearchFunc(OpBuilder &builder, ModuleOp module,
   //     hi = mid;
   //   else
   //     lo = mid + 1;
-  Value newLo = builder.create<arith::SelectOp>(loc, cond2, lo, midp1);
-  Value newHi = builder.create<arith::SelectOp>(loc, cond2, mid, hi);
-  builder.create<scf::YieldOp>(loc, ValueRange{newLo, newHi});
+  Value newLo = arith::SelectOp::create(builder, loc, cond2, lo, midp1);
+  Value newHi = arith::SelectOp::create(builder, loc, cond2, mid, hi);
+  scf::YieldOp::create(builder, loc, ValueRange{newLo, newHi});
 
   builder.setInsertionPointAfter(whileOp);
-  builder.create<func::ReturnOp>(loc, whileOp.getResult(0));
+  func::ReturnOp::create(builder, loc, whileOp.getResult(0));
 }
 
 /// Creates code to advance i in a loop based on xs[p] as follows:
@@ -393,7 +394,7 @@ static std::pair<Value, Value> createScanLoop(OpBuilder &builder,
                                               uint64_t ny, int step) {
   Location loc = func.getLoc();
   scf::WhileOp whileOp =
-      builder.create<scf::WhileOp>(loc, TypeRange{i.getType()}, ValueRange{i});
+      scf::WhileOp::create(builder, loc, TypeRange{i.getType()}, ValueRange{i});
 
   Block *before =
       builder.createBlock(&whileOp.getBefore(), {}, {i.getType()}, {loc});
@@ -409,14 +410,14 @@ static std::pair<Value, Value> createScanLoop(OpBuilder &builder,
   }
   compareOperands.append(xs.begin(), xs.end());
   Value cond = createInlinedLessThan(builder, loc, compareOperands, xPerm, ny);
-  builder.create<scf::ConditionOp>(loc, cond, before->getArguments());
+  scf::ConditionOp::create(builder, loc, cond, before->getArguments());
 
   Block *after =
       builder.createBlock(&whileOp.getAfter(), {}, {i.getType()}, {loc});
   builder.setInsertionPointToEnd(after);
   Value cs = constantIndex(builder, loc, step);
-  i = builder.create<arith::AddIOp>(loc, after->getArgument(0), cs);
-  builder.create<scf::YieldOp>(loc, ValueRange{i});
+  i = arith::AddIOp::create(builder, loc, after->getArgument(0), cs);
+  scf::YieldOp::create(builder, loc, ValueRange{i});
   i = whileOp.getResult(0);
 
   builder.setInsertionPointAfter(whileOp);
@@ -440,7 +441,7 @@ static scf::IfOp createCompareThenSwap(OpBuilder &builder, Location loc,
   compareOperands[0] = b;
   compareOperands[1] = a;
   Value cond = createInlinedLessThan(builder, loc, compareOperands, xPerm, ny);
-  scf::IfOp ifOp = builder.create<scf::IfOp>(loc, cond, /*else=*/false);
+  scf::IfOp ifOp = scf::IfOp::create(builder, loc, cond, /*else=*/false);
   builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
   swapOperands[0] = b;
   swapOperands[1] = a;
@@ -517,12 +518,12 @@ static void createChoosePivot(OpBuilder &builder, ModuleOp module,
   swapOperands.append(args.begin() + xStartIdx, args.end());
   Location loc = func.getLoc();
   Value c1 = constantIndex(builder, loc, 1);
-  Value hiP1 = builder.create<arith::AddIOp>(loc, hi, c1);
-  Value len = builder.create<arith::SubIOp>(loc, hiP1, lo);
+  Value hiP1 = arith::AddIOp::create(builder, loc, hi, c1);
+  Value len = arith::SubIOp::create(builder, loc, hiP1, lo);
   Value lenThreshold = constantIndex(builder, loc, 1000);
-  Value lenCond = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult,
-                                                len, lenThreshold);
-  scf::IfOp lenIf = builder.create<scf::IfOp>(loc, lenCond, /*else=*/true);
+  Value lenCond = arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ult,
+                                        len, lenThreshold);
+  scf::IfOp lenIf = scf::IfOp::create(builder, loc, lenCond, /*else=*/true);
 
   // When len < 1000, choose pivot from median of 3 values.
   builder.setInsertionPointToStart(&lenIf.getThenRegion().front());
@@ -531,13 +532,13 @@ static void createChoosePivot(OpBuilder &builder, ModuleOp module,
 
   // When len >= 1000, choose pivot from median of 5 values.
   builder.setInsertionPointToStart(&lenIf.getElseRegion().front());
-  Value miP1 = builder.create<arith::AddIOp>(loc, hi, c1);
-  Value a = builder.create<arith::AddIOp>(loc, lo, miP1);
+  Value miP1 = arith::AddIOp::create(builder, loc, hi, c1);
+  Value a = arith::AddIOp::create(builder, loc, lo, miP1);
   // Value a is the middle between [loc, mi].
-  a = builder.create<arith::ShRUIOp>(loc, a, c1);
-  Value b = builder.create<arith::AddIOp>(loc, mi, hiP1);
+  a = arith::ShRUIOp::create(builder, loc, a, c1);
+  Value b = arith::AddIOp::create(builder, loc, mi, hiP1);
   // Value b is the middle between [mi, hi].
-  b = builder.create<arith::ShRUIOp>(loc, b, c1);
+  b = arith::ShRUIOp::create(builder, loc, b, c1);
   createSort5(builder, loc, xPerm, ny, swapOperands, compareOperands, lo, a, mi,
               b, hi);
 
@@ -589,25 +590,25 @@ static void createPartitionFunc(OpBuilder &builder, ModuleOp module,
   ValueRange args = entryBlock->getArguments();
   Value lo = args[loIdx];
   Value hi = args[hiIdx];
-  Value sum = builder.create<arith::AddIOp>(loc, lo, hi);
+  Value sum = arith::AddIOp::create(builder, loc, lo, hi);
   Value c1 = constantIndex(builder, loc, 1);
-  Value p = builder.create<arith::ShRUIOp>(loc, sum, c1);
+  Value p = arith::ShRUIOp::create(builder, loc, sum, c1);
 
   Value i = lo;
-  Value j = builder.create<arith::SubIOp>(loc, hi, c1);
+  Value j = arith::SubIOp::create(builder, loc, hi, c1);
   createChoosePivot(builder, module, func, xPerm, ny, i, j, p, args);
   Value trueVal = constantI1(builder, loc, true); // The value for while (true)
   SmallVector<Value, 4> operands{i, j, p, trueVal}; // Exactly four values.
   SmallVector<Type, 4> types{i.getType(), j.getType(), p.getType(),
                              trueVal.getType()};
-  scf::WhileOp whileOp = builder.create<scf::WhileOp>(loc, types, operands);
+  scf::WhileOp whileOp = scf::WhileOp::create(builder, loc, types, operands);
 
   // The before-region of the WhileOp.
   Block *before = builder.createBlock(&whileOp.getBefore(), {}, types,
                                       {loc, loc, loc, loc});
   builder.setInsertionPointToEnd(before);
-  builder.create<scf::ConditionOp>(loc, before->getArgument(3),
-                                   before->getArguments());
+  scf::ConditionOp::create(builder, loc, before->getArgument(3),
+                           before->getArguments());
 
   // The after-region of the WhileOp.
   Block *after =
@@ -629,70 +630,72 @@ static void createPartitionFunc(OpBuilder &builder, ModuleOp module,
 
   // If i < j:
   Value cond =
-      builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, i, j);
-  scf::IfOp ifOp = builder.create<scf::IfOp>(loc, types, cond, /*else=*/true);
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ult, i, j);
+  scf::IfOp ifOp = scf::IfOp::create(builder, loc, types, cond, /*else=*/true);
   builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
   SmallVector<Value> swapOperands{i, j};
   swapOperands.append(args.begin() + xStartIdx, args.end());
   createSwap(builder, loc, swapOperands, xPerm, ny);
   // If the pivot is moved, update p with the new pivot.
   Value icond =
-      builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq, i, p);
-  scf::IfOp ifOpI = builder.create<scf::IfOp>(loc, TypeRange{p.getType()},
-                                              icond, /*else=*/true);
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::eq, i, p);
+  scf::IfOp ifOpI = scf::IfOp::create(builder, loc, TypeRange{p.getType()},
+                                      icond, /*else=*/true);
   builder.setInsertionPointToStart(&ifOpI.getThenRegion().front());
-  builder.create<scf::YieldOp>(loc, ValueRange{j});
+  scf::YieldOp::create(builder, loc, ValueRange{j});
   builder.setInsertionPointToStart(&ifOpI.getElseRegion().front());
   Value jcond =
-      builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq, j, p);
-  scf::IfOp ifOpJ = builder.create<scf::IfOp>(loc, TypeRange{p.getType()},
-                                              jcond, /*else=*/true);
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::eq, j, p);
+  scf::IfOp ifOpJ = scf::IfOp::create(builder, loc, TypeRange{p.getType()},
+                                      jcond, /*else=*/true);
   builder.setInsertionPointToStart(&ifOpJ.getThenRegion().front());
-  builder.create<scf::YieldOp>(loc, ValueRange{i});
+  scf::YieldOp::create(builder, loc, ValueRange{i});
   builder.setInsertionPointToStart(&ifOpJ.getElseRegion().front());
-  builder.create<scf::YieldOp>(loc, ValueRange{p});
+  scf::YieldOp::create(builder, loc, ValueRange{p});
   builder.setInsertionPointAfter(ifOpJ);
-  builder.create<scf::YieldOp>(loc, ifOpJ.getResults());
+  scf::YieldOp::create(builder, loc, ifOpJ.getResults());
   builder.setInsertionPointAfter(ifOpI);
   Value compareEqIJ =
-      builder.create<arith::AndIOp>(loc, iCompareEq, jCompareEq);
-  scf::IfOp ifOp2 = builder.create<scf::IfOp>(
-      loc, TypeRange{i.getType(), j.getType()}, compareEqIJ, /*else=*/true);
+      arith::AndIOp::create(builder, loc, iCompareEq, jCompareEq);
+  scf::IfOp ifOp2 =
+      scf::IfOp::create(builder, loc, TypeRange{i.getType(), j.getType()},
+                        compareEqIJ, /*else=*/true);
   builder.setInsertionPointToStart(&ifOp2.getThenRegion().front());
-  Value i2 = builder.create<arith::AddIOp>(loc, i, c1);
-  Value j2 = builder.create<arith::SubIOp>(loc, j, c1);
-  builder.create<scf::YieldOp>(loc, ValueRange{i2, j2});
+  Value i2 = arith::AddIOp::create(builder, loc, i, c1);
+  Value j2 = arith::SubIOp::create(builder, loc, j, c1);
+  scf::YieldOp::create(builder, loc, ValueRange{i2, j2});
   builder.setInsertionPointToStart(&ifOp2.getElseRegion().front());
-  builder.create<scf::YieldOp>(loc, ValueRange{i, j});
+  scf::YieldOp::create(builder, loc, ValueRange{i, j});
   builder.setInsertionPointAfter(ifOp2);
-  builder.create<scf::YieldOp>(
-      loc,
-      ValueRange{ifOp2.getResult(0), ifOp2.getResult(1), ifOpI.getResult(0),
-                 /*cont=*/constantI1(builder, loc, true)});
+  scf::YieldOp::create(builder, loc,
+                       ValueRange{ifOp2.getResult(0), ifOp2.getResult(1),
+                                  ifOpI.getResult(0),
+                                  /*cont=*/constantI1(builder, loc, true)});
 
   // False branch for if i < j (i.e., i >= j):
   builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-  p = builder.create<arith::AddIOp>(loc, j,
-                                    constantOne(builder, loc, j.getType()));
-  builder.create<scf::YieldOp>(
-      loc, ValueRange{i, j, p, /*cont=*/constantI1(builder, loc, false)});
+  p = arith::AddIOp::create(builder, loc, j,
+                            constantOne(builder, loc, j.getType()));
+  scf::YieldOp::create(
+      builder, loc,
+      ValueRange{i, j, p, /*cont=*/constantI1(builder, loc, false)});
 
   // Return for the whileOp.
   builder.setInsertionPointAfter(ifOp);
-  builder.create<scf::YieldOp>(loc, ifOp.getResults());
+  scf::YieldOp::create(builder, loc, ifOp.getResults());
 
   // Return for the function.
   builder.setInsertionPointAfter(whileOp);
-  builder.create<func::ReturnOp>(loc, whileOp.getResult(2));
+  func::ReturnOp::create(builder, loc, whileOp.getResult(2));
 }
 
 /// Computes (n-2)/n, assuming n has index type.
 static Value createSubTwoDividedByTwo(OpBuilder &builder, Location loc,
                                       Value n) {
   Value i2 = constantIndex(builder, loc, 2);
-  Value res = builder.create<arith::SubIOp>(loc, n, i2);
+  Value res = arith::SubIOp::create(builder, loc, n, i2);
   Value i1 = constantIndex(builder, loc, 1);
-  return builder.create<arith::ShRUIOp>(loc, res, i1);
+  return arith::ShRUIOp::create(builder, loc, res, i1);
 }
 
 /// Creates a function to heapify the subtree with root `start` within the full
@@ -743,16 +746,16 @@ static void createShiftDownFunc(OpBuilder &builder, ModuleOp module,
   // If (n >= 2).
   Value c2 = constantIndex(builder, loc, 2);
   Value condN =
-      builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::uge, n, c2);
-  scf::IfOp ifN = builder.create<scf::IfOp>(loc, condN, /*else=*/false);
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::uge, n, c2);
+  scf::IfOp ifN = scf::IfOp::create(builder, loc, condN, /*else=*/false);
   builder.setInsertionPointToStart(&ifN.getThenRegion().front());
-  Value child = builder.create<arith::SubIOp>(loc, start, first);
+  Value child = arith::SubIOp::create(builder, loc, start, first);
 
   // If ((n-2)/2 >= child).
   Value t = createSubTwoDividedByTwo(builder, loc, n);
   Value condNc =
-      builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::uge, t, child);
-  scf::IfOp ifNc = builder.create<scf::IfOp>(loc, condNc, /*else=*/false);
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::uge, t, child);
+  scf::IfOp ifNc = scf::IfOp::create(builder, loc, condNc, /*else=*/false);
 
   builder.setInsertionPointToStart(&ifNc.getThenRegion().front());
   Value c1 = constantIndex(builder, loc, 1);
@@ -768,32 +771,32 @@ static void createShiftDownFunc(OpBuilder &builder, ModuleOp module,
   //   if (child+1 < n && data[childIndex] < data[childIndex+1])
   //     childIndex ++; child ++ // Right child is bigger.
   auto getLargerChild = [&](Value r) -> std::pair<Value, Value> {
-    Value lChild = builder.create<arith::ShLIOp>(loc, r, c1);
-    lChild = builder.create<arith::AddIOp>(loc, lChild, c1);
-    Value lChildIdx = builder.create<arith::AddIOp>(loc, lChild, first);
-    Value rChild = builder.create<arith::AddIOp>(loc, lChild, c1);
-    Value cond1 = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult,
-                                                rChild, n);
+    Value lChild = arith::ShLIOp::create(builder, loc, r, c1);
+    lChild = arith::AddIOp::create(builder, loc, lChild, c1);
+    Value lChildIdx = arith::AddIOp::create(builder, loc, lChild, first);
+    Value rChild = arith::AddIOp::create(builder, loc, lChild, c1);
+    Value cond1 = arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ult,
+                                        rChild, n);
     SmallVector<Type, 2> ifTypes(2, r.getType());
     scf::IfOp if1 =
-        builder.create<scf::IfOp>(loc, ifTypes, cond1, /*else=*/true);
+        scf::IfOp::create(builder, loc, ifTypes, cond1, /*else=*/true);
     builder.setInsertionPointToStart(&if1.getThenRegion().front());
-    Value rChildIdx = builder.create<arith::AddIOp>(loc, rChild, first);
+    Value rChildIdx = arith::AddIOp::create(builder, loc, rChild, first);
     // Compare data[left] < data[right].
     compareOperands[0] = lChildIdx;
     compareOperands[1] = rChildIdx;
     Value cond2 =
         createInlinedLessThan(builder, loc, compareOperands, xPerm, ny);
     scf::IfOp if2 =
-        builder.create<scf::IfOp>(loc, ifTypes, cond2, /*else=*/true);
+        scf::IfOp::create(builder, loc, ifTypes, cond2, /*else=*/true);
     builder.setInsertionPointToStart(&if2.getThenRegion().front());
-    builder.create<scf::YieldOp>(loc, ValueRange{rChild, rChildIdx});
+    scf::YieldOp::create(builder, loc, ValueRange{rChild, rChildIdx});
     builder.setInsertionPointToStart(&if2.getElseRegion().front());
-    builder.create<scf::YieldOp>(loc, ValueRange{lChild, lChildIdx});
+    scf::YieldOp::create(builder, loc, ValueRange{lChild, lChildIdx});
     builder.setInsertionPointAfter(if2);
-    builder.create<scf::YieldOp>(loc, if2.getResults());
+    scf::YieldOp::create(builder, loc, if2.getResults());
     builder.setInsertionPointToStart(&if1.getElseRegion().front());
-    builder.create<scf::YieldOp>(loc, ValueRange{lChild, lChildIdx});
+    scf::YieldOp::create(builder, loc, ValueRange{lChild, lChildIdx});
     builder.setInsertionPointAfter(if1);
     return std::make_pair(if1.getResult(0), if1.getResult(1));
   };
@@ -803,8 +806,8 @@ static void createShiftDownFunc(OpBuilder &builder, ModuleOp module,
 
   // While (data[start] < data[childIndex]).
   SmallVector<Type, 3> types(3, child.getType());
-  scf::WhileOp whileOp = builder.create<scf::WhileOp>(
-      loc, types, SmallVector<Value, 2>{start, child, childIdx});
+  scf::WhileOp whileOp = scf::WhileOp::create(
+      builder, loc, types, SmallVector<Value, 2>{start, child, childIdx});
 
   // The before-region of the WhileOp.
   SmallVector<Location, 3> locs(3, loc);
@@ -815,7 +818,7 @@ static void createShiftDownFunc(OpBuilder &builder, ModuleOp module,
   compareOperands[0] = start;
   compareOperands[1] = childIdx;
   Value cond = createInlinedLessThan(builder, loc, compareOperands, xPerm, ny);
-  builder.create<scf::ConditionOp>(loc, cond, before->getArguments());
+  scf::ConditionOp::create(builder, loc, cond, before->getArguments());
 
   // The after-region of the WhileOp.
   Block *after = builder.createBlock(&whileOp.getAfter(), {}, types, locs);
@@ -827,20 +830,21 @@ static void createShiftDownFunc(OpBuilder &builder, ModuleOp module,
   createSwap(builder, loc, swapOperands, xPerm, ny);
   start = childIdx;
   Value cond2 =
-      builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::uge, t, child);
-  scf::IfOp if2 = builder.create<scf::IfOp>(
-      loc, TypeRange{child.getType(), child.getType()}, cond2, /*else=*/true);
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::uge, t, child);
+  scf::IfOp if2 = scf::IfOp::create(builder, loc,
+                                    TypeRange{child.getType(), child.getType()},
+                                    cond2, /*else=*/true);
   builder.setInsertionPointToStart(&if2.getThenRegion().front());
   auto [newChild, newChildIdx] = getLargerChild(child);
-  builder.create<scf::YieldOp>(loc, ValueRange{newChild, newChildIdx});
+  scf::YieldOp::create(builder, loc, ValueRange{newChild, newChildIdx});
   builder.setInsertionPointToStart(&if2.getElseRegion().front());
-  builder.create<scf::YieldOp>(loc, ValueRange{child, childIdx});
+  scf::YieldOp::create(builder, loc, ValueRange{child, childIdx});
   builder.setInsertionPointAfter(if2);
-  builder.create<scf::YieldOp>(
-      loc, ValueRange{start, if2.getResult(0), if2.getResult(1)});
+  scf::YieldOp::create(builder, loc,
+                       ValueRange{start, if2.getResult(0), if2.getResult(1)});
 
   builder.setInsertionPointAfter(ifN);
-  builder.create<func::ReturnOp>(loc);
+  func::ReturnOp::create(builder, loc);
 }
 
 /// Creates a function to perform heap sort on the values in the range of index
@@ -870,45 +874,45 @@ static void createHeapSortFunc(OpBuilder &builder, ModuleOp module,
   ValueRange args = entryBlock->getArguments();
   Value lo = args[loIdx];
   Value hi = args[hiIdx];
-  Value n = builder.create<arith::SubIOp>(loc, hi, lo);
+  Value n = arith::SubIOp::create(builder, loc, hi, lo);
 
   // For i = (n-2)/2 downto 0.
   Value c0 = constantIndex(builder, loc, 0);
   Value c1 = constantIndex(builder, loc, 1);
   Value s = createSubTwoDividedByTwo(builder, loc, n);
-  Value up = builder.create<arith::AddIOp>(loc, s, c1);
-  scf::ForOp forI = builder.create<scf::ForOp>(loc, c0, up, c1);
+  Value up = arith::AddIOp::create(builder, loc, s, c1);
+  scf::ForOp forI = scf::ForOp::create(builder, loc, c0, up, c1);
   builder.setInsertionPointToStart(forI.getBody());
-  Value i = builder.create<arith::SubIOp>(loc, s, forI.getInductionVar());
-  Value lopi = builder.create<arith::AddIOp>(loc, lo, i);
+  Value i = arith::SubIOp::create(builder, loc, s, forI.getInductionVar());
+  Value lopi = arith::AddIOp::create(builder, loc, lo, i);
   SmallVector<Value> shiftDownOperands = {lo, lopi};
   shiftDownOperands.append(args.begin() + xStartIdx, args.end());
   shiftDownOperands.push_back(n);
   FlatSymbolRefAttr shiftDownFunc = getMangledSortHelperFunc(
       builder, func, TypeRange(), kShiftDownFuncNamePrefix, xPerm, ny,
       shiftDownOperands, createShiftDownFunc, /*nTrailingP=*/1);
-  builder.create<func::CallOp>(loc, shiftDownFunc, TypeRange(),
-                               shiftDownOperands);
+  func::CallOp::create(builder, loc, shiftDownFunc, TypeRange(),
+                       shiftDownOperands);
 
   builder.setInsertionPointAfter(forI);
   // For l = n downto 2.
-  up = builder.create<arith::SubIOp>(loc, n, c1);
-  scf::ForOp forL = builder.create<scf::ForOp>(loc, c0, up, c1);
+  up = arith::SubIOp::create(builder, loc, n, c1);
+  scf::ForOp forL = scf::ForOp::create(builder, loc, c0, up, c1);
   builder.setInsertionPointToStart(forL.getBody());
-  Value l = builder.create<arith::SubIOp>(loc, n, forL.getInductionVar());
-  Value loplm1 = builder.create<arith::AddIOp>(loc, lo, l);
-  loplm1 = builder.create<arith::SubIOp>(loc, loplm1, c1);
+  Value l = arith::SubIOp::create(builder, loc, n, forL.getInductionVar());
+  Value loplm1 = arith::AddIOp::create(builder, loc, lo, l);
+  loplm1 = arith::SubIOp::create(builder, loc, loplm1, c1);
   SmallVector<Value> swapOperands{lo, loplm1};
   swapOperands.append(args.begin() + xStartIdx, args.end());
   createSwap(builder, loc, swapOperands, xPerm, ny);
   shiftDownOperands[1] = lo;
   shiftDownOperands[shiftDownOperands.size() - 1] =
-      builder.create<arith::SubIOp>(loc, l, c1);
-  builder.create<func::CallOp>(loc, shiftDownFunc, TypeRange(),
-                               shiftDownOperands);
+      arith::SubIOp::create(builder, loc, l, c1);
+  func::CallOp::create(builder, loc, shiftDownFunc, TypeRange(),
+                       shiftDownOperands);
 
   builder.setInsertionPointAfter(forL);
-  builder.create<func::ReturnOp>(loc);
+  func::ReturnOp::create(builder, loc);
 }
 
 /// A helper for generating code to perform quick sort. It partitions [lo, hi),
@@ -933,35 +937,35 @@ createQuickSort(OpBuilder &builder, ModuleOp module, func::FuncOp func,
                                       args.drop_back(nTrailingP))
                 .getResult(0);
 
-  Value lenLow = builder.create<arith::SubIOp>(loc, p, lo);
-  Value lenHigh = builder.create<arith::SubIOp>(loc, hi, p);
+  Value lenLow = arith::SubIOp::create(builder, loc, p, lo);
+  Value lenHigh = arith::SubIOp::create(builder, loc, hi, p);
   // Partition already sorts array with len <= 2
   Value c2 = constantIndex(builder, loc, 2);
-  Value len = builder.create<arith::SubIOp>(loc, hi, lo);
+  Value len = arith::SubIOp::create(builder, loc, hi, lo);
   Value lenGtTwo =
-      builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ugt, len, c2);
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ugt, len, c2);
   scf::IfOp ifLenGtTwo =
-      builder.create<scf::IfOp>(loc, types, lenGtTwo, /*else=*/true);
+      scf::IfOp::create(builder, loc, types, lenGtTwo, /*else=*/true);
   builder.setInsertionPointToStart(&ifLenGtTwo.getElseRegion().front());
   // Returns an empty range to mark the entire region is fully sorted.
-  builder.create<scf::YieldOp>(loc, ValueRange{lo, lo});
+  scf::YieldOp::create(builder, loc, ValueRange{lo, lo});
 
   // Else len > 2, need recursion.
   builder.setInsertionPointToStart(&ifLenGtTwo.getThenRegion().front());
-  Value cond = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ule,
-                                             lenLow, lenHigh);
+  Value cond = arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ule,
+                                     lenLow, lenHigh);
 
   Value c0 = constantIndex(builder, loc, 0);
-  scf::IfOp ifOp = builder.create<scf::IfOp>(loc, types, cond, /*else=*/true);
+  scf::IfOp ifOp = scf::IfOp::create(builder, loc, types, cond, /*else=*/true);
 
   auto mayRecursion = [&](Value low, Value high, Value len) {
     Value cond =
-        builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ne, len, c0);
-    scf::IfOp ifOp = builder.create<scf::IfOp>(loc, cond, /*else=*/false);
+        arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ne, len, c0);
+    scf::IfOp ifOp = scf::IfOp::create(builder, loc, cond, /*else=*/false);
     builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
     SmallVector<Value> operands{low, high};
     operands.append(args.begin() + xStartIdx, args.end());
-    builder.create<func::CallOp>(loc, func, operands);
+    func::CallOp::create(builder, loc, func, operands);
     builder.setInsertionPointAfter(ifOp);
   };
 
@@ -969,14 +973,14 @@ createQuickSort(OpBuilder &builder, ModuleOp module, func::FuncOp func,
   // the bigger partition to be processed by the enclosed while-loop.
   builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
   mayRecursion(lo, p, lenLow);
-  builder.create<scf::YieldOp>(loc, ValueRange{p, hi});
+  scf::YieldOp::create(builder, loc, ValueRange{p, hi});
 
   builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
   mayRecursion(p, hi, lenHigh);
-  builder.create<scf::YieldOp>(loc, ValueRange{lo, p});
+  scf::YieldOp::create(builder, loc, ValueRange{lo, p});
 
   builder.setInsertionPointAfter(ifOp);
-  builder.create<scf::YieldOp>(loc, ifOp.getResults());
+  scf::YieldOp::create(builder, loc, ifOp.getResults());
 
   builder.setInsertionPointAfter(ifLenGtTwo);
   return std::make_pair(ifLenGtTwo.getResult(0), ifLenGtTwo.getResult(1));
@@ -1011,10 +1015,10 @@ static void createSortStableFunc(OpBuilder &builder, ModuleOp module,
   Value c1 = constantIndex(builder, loc, 1);
   Value lo = args[loIdx];
   Value hi = args[hiIdx];
-  Value lop1 = builder.create<arith::AddIOp>(loc, lo, c1);
+  Value lop1 = arith::AddIOp::create(builder, loc, lo, c1);
 
   // Start the outer for-stmt with induction variable i.
-  scf::ForOp forOpI = builder.create<scf::ForOp>(loc, lop1, hi, c1);
+  scf::ForOp forOpI = scf::ForOp::create(builder, loc, lop1, hi, c1);
   builder.setInsertionPointToStart(forOpI.getBody());
   Value i = forOpI.getInductionVar();
 
@@ -1035,24 +1039,24 @@ static void createSortStableFunc(OpBuilder &builder, ModuleOp module,
   forEachIJPairInAllBuffers(
       builder, loc, operands, xPerm, ny,
       [&](uint64_t unused, Value i, Value unused2, Value buffer) {
-        d.push_back(builder.create<memref::LoadOp>(loc, buffer, i));
+        d.push_back(memref::LoadOp::create(builder, loc, buffer, i));
       });
 
   // Start the inner for-stmt with induction variable j, for moving data[p..i)
   // to data[p+1..i+1).
-  Value imp = builder.create<arith::SubIOp>(loc, i, p);
+  Value imp = arith::SubIOp::create(builder, loc, i, p);
   Value c0 = constantIndex(builder, loc, 0);
-  scf::ForOp forOpJ = builder.create<scf::ForOp>(loc, c0, imp, c1);
+  scf::ForOp forOpJ = scf::ForOp::create(builder, loc, c0, imp, c1);
   builder.setInsertionPointToStart(forOpJ.getBody());
   Value j = forOpJ.getInductionVar();
-  Value imj = builder.create<arith::SubIOp>(loc, i, j);
+  Value imj = arith::SubIOp::create(builder, loc, i, j);
   operands[1] = imj;
-  operands[0] = builder.create<arith::SubIOp>(loc, imj, c1);
+  operands[0] = arith::SubIOp::create(builder, loc, imj, c1);
   forEachIJPairInAllBuffers(
       builder, loc, operands, xPerm, ny,
       [&](uint64_t unused, Value imjm1, Value imj, Value buffer) {
-        Value t = builder.create<memref::LoadOp>(loc, buffer, imjm1);
-        builder.create<memref::StoreOp>(loc, t, buffer, imj);
+        Value t = memref::LoadOp::create(builder, loc, buffer, imjm1);
+        memref::StoreOp::create(builder, loc, t, buffer, imj);
       });
 
   // Store the value at data[i] to data[p].
@@ -1061,11 +1065,11 @@ static void createSortStableFunc(OpBuilder &builder, ModuleOp module,
   forEachIJPairInAllBuffers(
       builder, loc, operands, xPerm, ny,
       [&](uint64_t k, Value p, Value usused, Value buffer) {
-        builder.create<memref::StoreOp>(loc, d[k], buffer, p);
+        memref::StoreOp::create(builder, loc, d[k], buffer, p);
       });
 
   builder.setInsertionPointAfter(forOpI);
-  builder.create<func::ReturnOp>(loc);
+  func::ReturnOp::create(builder, loc);
 }
 
 /// Creates a function to perform quick sort or a hybrid quick sort on the
@@ -1127,7 +1131,7 @@ static void createQuickSortFunc(OpBuilder &builder, ModuleOp module,
   Value hi = args[hiIdx];
   SmallVector<Type, 2> types(2, lo.getType()); // Only two types.
   scf::WhileOp whileOp =
-      builder.create<scf::WhileOp>(loc, types, SmallVector<Value, 2>{lo, hi});
+      scf::WhileOp::create(builder, loc, types, SmallVector<Value, 2>{lo, hi});
 
   // The before-region of the WhileOp.
   Block *before =
@@ -1136,10 +1140,10 @@ static void createQuickSortFunc(OpBuilder &builder, ModuleOp module,
   lo = before->getArgument(0);
   hi = before->getArgument(1);
   Value loP1 =
-      builder.create<arith::AddIOp>(loc, lo, constantIndex(builder, loc, 1));
+      arith::AddIOp::create(builder, loc, lo, constantIndex(builder, loc, 1));
   Value needSort =
-      builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, loP1, hi);
-  builder.create<scf::ConditionOp>(loc, needSort, before->getArguments());
+      arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ult, loP1, hi);
+  scf::ConditionOp::create(builder, loc, needSort, before->getArguments());
 
   // The after-region of the WhileOp.
   Block *after =
@@ -1151,53 +1155,53 @@ static void createQuickSortFunc(OpBuilder &builder, ModuleOp module,
   args[1] = hi;
 
   if (isHybrid) {
-    Value len = builder.create<arith::SubIOp>(loc, hi, lo);
+    Value len = arith::SubIOp::create(builder, loc, hi, lo);
     Value lenLimit = constantIndex(builder, loc, 30);
-    Value lenCond = builder.create<arith::CmpIOp>(
-        loc, arith::CmpIPredicate::ule, len, lenLimit);
+    Value lenCond = arith::CmpIOp::create(
+        builder, loc, arith::CmpIPredicate::ule, len, lenLimit);
     scf::IfOp lenIf =
-        builder.create<scf::IfOp>(loc, types, lenCond, /*else=*/true);
+        scf::IfOp::create(builder, loc, types, lenCond, /*else=*/true);
 
     // When len <= limit.
     builder.setInsertionPointToStart(&lenIf.getThenRegion().front());
     FlatSymbolRefAttr insertionSortFunc = getMangledSortHelperFunc(
         builder, func, TypeRange(), kSortStableFuncNamePrefix, xPerm, ny,
         ValueRange(args).drop_back(nTrailingP), createSortStableFunc);
-    builder.create<func::CallOp>(loc, insertionSortFunc, TypeRange(),
-                                 ValueRange(args).drop_back(nTrailingP));
-    builder.create<scf::YieldOp>(loc, ValueRange{lo, lo});
+    func::CallOp::create(builder, loc, insertionSortFunc, TypeRange(),
+                         ValueRange(args).drop_back(nTrailingP));
+    scf::YieldOp::create(builder, loc, ValueRange{lo, lo});
 
     // When len > limit.
     builder.setInsertionPointToStart(&lenIf.getElseRegion().front());
     Value depthLimit = args.back();
-    depthLimit = builder.create<arith::SubIOp>(loc, depthLimit,
-                                               constantI64(builder, loc, 1));
+    depthLimit = arith::SubIOp::create(builder, loc, depthLimit,
+                                       constantI64(builder, loc, 1));
     Value depthCond =
-        builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ule,
-                                      depthLimit, constantI64(builder, loc, 0));
+        arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ule,
+                              depthLimit, constantI64(builder, loc, 0));
     scf::IfOp depthIf =
-        builder.create<scf::IfOp>(loc, types, depthCond, /*else=*/true);
+        scf::IfOp::create(builder, loc, types, depthCond, /*else=*/true);
 
     // When depth exceeds limit.
     builder.setInsertionPointToStart(&depthIf.getThenRegion().front());
     FlatSymbolRefAttr heapSortFunc = getMangledSortHelperFunc(
         builder, func, TypeRange(), kHeapSortFuncNamePrefix, xPerm, ny,
         ValueRange(args).drop_back(nTrailingP), createHeapSortFunc);
-    builder.create<func::CallOp>(loc, heapSortFunc, TypeRange(),
-                                 ValueRange(args).drop_back(nTrailingP));
-    builder.create<scf::YieldOp>(loc, ValueRange{lo, lo});
+    func::CallOp::create(builder, loc, heapSortFunc, TypeRange(),
+                         ValueRange(args).drop_back(nTrailingP));
+    scf::YieldOp::create(builder, loc, ValueRange{lo, lo});
 
     // When depth doesn't exceed limit.
     builder.setInsertionPointToStart(&depthIf.getElseRegion().front());
     args.back() = depthLimit;
     std::tie(lo, hi) =
         createQuickSort(builder, module, func, args, xPerm, ny, nTrailingP);
-    builder.create<scf::YieldOp>(loc, ValueRange{lo, hi});
+    scf::YieldOp::create(builder, loc, ValueRange{lo, hi});
 
     builder.setInsertionPointAfter(depthIf);
     lo = depthIf.getResult(0);
     hi = depthIf.getResult(1);
-    builder.create<scf::YieldOp>(loc, ValueRange{lo, hi});
+    scf::YieldOp::create(builder, loc, ValueRange{lo, hi});
 
     builder.setInsertionPointAfter(lenIf);
     lo = lenIf.getResult(0);
@@ -1208,11 +1212,11 @@ static void createQuickSortFunc(OpBuilder &builder, ModuleOp module,
   }
 
   // New [lo, hi) for the next while-loop iteration.
-  builder.create<scf::YieldOp>(loc, ValueRange{lo, hi});
+  scf::YieldOp::create(builder, loc, ValueRange{lo, hi});
 
   // After the while-loop.
   builder.setInsertionPointAfter(whileOp);
-  builder.create<func::ReturnOp>(loc);
+  func::ReturnOp::create(builder, loc);
 }
 
 /// Implements the rewriting for operator sort and sort_coo.
@@ -1228,7 +1232,7 @@ LogicalResult matchAndRewriteSortOp(OpTy op, ValueRange xys, AffineMap xPerm,
     if (!mtp.isDynamicDim(0)) {
       auto newMtp =
           MemRefType::get({ShapedType::kDynamic}, mtp.getElementType());
-      v = rewriter.create<memref::CastOp>(loc, newMtp, v);
+      v = memref::CastOp::create(rewriter, loc, newMtp, v);
     }
     operands.push_back(v);
   }
@@ -1248,12 +1252,12 @@ LogicalResult matchAndRewriteSortOp(OpTy op, ValueRange xys, AffineMap xPerm,
     // As a heuristics, set depthLimit = 2 * log2(n).
     Value lo = operands[loIdx];
     Value hi = operands[hiIdx];
-    Value len = rewriter.create<arith::IndexCastOp>(
-        loc, rewriter.getI64Type(),
-        rewriter.create<arith::SubIOp>(loc, hi, lo));
-    Value depthLimit = rewriter.create<arith::SubIOp>(
-        loc, constantI64(rewriter, loc, 64),
-        rewriter.create<math::CountLeadingZerosOp>(loc, len));
+    Value len = arith::IndexCastOp::create(
+        rewriter, loc, rewriter.getI64Type(),
+        arith::SubIOp::create(rewriter, loc, hi, lo));
+    Value depthLimit = arith::SubIOp::create(
+        rewriter, loc, constantI64(rewriter, loc, 64),
+        math::CountLeadingZerosOp::create(rewriter, loc, len));
     operands.push_back(depthLimit);
     break;
   }
@@ -1307,33 +1311,33 @@ struct PushBackRewriter : OpRewritePattern<PushBackOp> {
     Location loc = op->getLoc();
     Value c0 = constantIndex(rewriter, loc, 0);
     Value buffer = op.getInBuffer();
-    Value capacity = rewriter.create<memref::DimOp>(loc, buffer, c0);
+    Value capacity = memref::DimOp::create(rewriter, loc, buffer, c0);
     Value size = op.getCurSize();
     Value value = op.getValue();
 
     Value n = op.getN() ? op.getN() : constantIndex(rewriter, loc, 1);
-    Value newSize = rewriter.create<arith::AddIOp>(loc, size, n);
+    Value newSize = arith::AddIOp::create(rewriter, loc, size, n);
     auto nValue = dyn_cast_or_null<arith::ConstantIndexOp>(n.getDefiningOp());
     bool nIsOne = (nValue && nValue.value() == 1);
 
     if (!op.getInbounds()) {
-      Value cond = rewriter.create<arith::CmpIOp>(
-          loc, arith::CmpIPredicate::ugt, newSize, capacity);
+      Value cond = arith::CmpIOp::create(
+          rewriter, loc, arith::CmpIPredicate::ugt, newSize, capacity);
 
       Value c2 = constantIndex(rewriter, loc, 2);
       auto bufferType =
           MemRefType::get({ShapedType::kDynamic}, value.getType());
-      scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, bufferType, cond,
-                                                  /*else=*/true);
+      scf::IfOp ifOp = scf::IfOp::create(rewriter, loc, bufferType, cond,
+                                         /*else=*/true);
       // True branch.
       rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
       if (nIsOne) {
-        capacity = rewriter.create<arith::MulIOp>(loc, capacity, c2);
+        capacity = arith::MulIOp::create(rewriter, loc, capacity, c2);
       } else {
         // Use a do-while loop to calculate the new capacity as follows:
         //   do { new_capacity *= 2 } while (size > new_capacity)
         scf::WhileOp whileOp =
-            rewriter.create<scf::WhileOp>(loc, capacity.getType(), capacity);
+            scf::WhileOp::create(rewriter, loc, capacity.getType(), capacity);
 
         // The before-region of the WhileOp.
         Block *before = rewriter.createBlock(&whileOp.getBefore(), {},
@@ -1341,36 +1345,37 @@ struct PushBackRewriter : OpRewritePattern<PushBackOp> {
         rewriter.setInsertionPointToEnd(before);
 
         capacity =
-            rewriter.create<arith::MulIOp>(loc, before->getArgument(0), c2);
-        cond = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ugt,
-                                              newSize, capacity);
-        rewriter.create<scf::ConditionOp>(loc, cond, ValueRange{capacity});
+            arith::MulIOp::create(rewriter, loc, before->getArgument(0), c2);
+        cond = arith::CmpIOp::create(rewriter, loc, arith::CmpIPredicate::ugt,
+                                     newSize, capacity);
+        scf::ConditionOp::create(rewriter, loc, cond, ValueRange{capacity});
         // The after-region of the WhileOp.
         Block *after = rewriter.createBlock(&whileOp.getAfter(), {},
                                             {capacity.getType()}, {loc});
         rewriter.setInsertionPointToEnd(after);
-        rewriter.create<scf::YieldOp>(loc, after->getArguments());
+        scf::YieldOp::create(rewriter, loc, after->getArguments());
 
         rewriter.setInsertionPointAfter(whileOp);
         capacity = whileOp.getResult(0);
       }
 
-      Value newBuffer =
-          rewriter.create<memref::ReallocOp>(loc, bufferType, buffer, capacity);
+      Value newBuffer = memref::ReallocOp::create(rewriter, loc, bufferType,
+                                                  buffer, capacity);
       if (enableBufferInitialization) {
-        Value fillSize = rewriter.create<arith::SubIOp>(loc, capacity, newSize);
+        Value fillSize =
+            arith::SubIOp::create(rewriter, loc, capacity, newSize);
         Value fillValue = constantZero(rewriter, loc, value.getType());
-        Value subBuffer = rewriter.create<memref::SubViewOp>(
-            loc, newBuffer, /*offset=*/ValueRange{newSize},
+        Value subBuffer = memref::SubViewOp::create(
+            rewriter, loc, newBuffer, /*offset=*/ValueRange{newSize},
             /*size=*/ValueRange{fillSize},
             /*step=*/ValueRange{constantIndex(rewriter, loc, 1)});
-        rewriter.create<linalg::FillOp>(loc, fillValue, subBuffer);
+        linalg::FillOp::create(rewriter, loc, fillValue, subBuffer);
       }
-      rewriter.create<scf::YieldOp>(loc, newBuffer);
+      scf::YieldOp::create(rewriter, loc, newBuffer);
 
       // False branch.
       rewriter.setInsertionPointToStart(&ifOp.getElseRegion().front());
-      rewriter.create<scf::YieldOp>(loc, buffer);
+      scf::YieldOp::create(rewriter, loc, buffer);
 
       // Prepare for adding the value to the end of the buffer.
       rewriter.setInsertionPointAfter(ifOp);
@@ -1379,12 +1384,13 @@ struct PushBackRewriter : OpRewritePattern<PushBackOp> {
 
     // Add the value to the end of the buffer.
     if (nIsOne) {
-      rewriter.create<memref::StoreOp>(loc, value, buffer, size);
+      memref::StoreOp::create(rewriter, loc, value, buffer, size);
     } else {
-      Value subBuffer = rewriter.create<memref::SubViewOp>(
-          loc, buffer, /*offset=*/ValueRange{size}, /*size=*/ValueRange{n},
+      Value subBuffer = memref::SubViewOp::create(
+          rewriter, loc, buffer, /*offset=*/ValueRange{size},
+          /*size=*/ValueRange{n},
           /*step=*/ValueRange{constantIndex(rewriter, loc, 1)});
-      rewriter.create<linalg::FillOp>(loc, value, subBuffer);
+      linalg::FillOp::create(rewriter, loc, value, subBuffer);
     }
 
     // Update the buffer size.

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
index e89b34d457ff8..a317abd6c560b 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
@@ -59,8 +59,8 @@ static gpu::GPUModuleOp genGPUModule(OpBuilder &builder, ModuleOp topModule) {
     return op; // existing
   markAsGPUContainer(topModule);
   builder.setInsertionPointToStart(topModule.getBody());
-  return builder.create<gpu::GPUModuleOp>(topModule->getLoc(),
-                                          "sparse_kernels");
+  return gpu::GPUModuleOp::create(builder, topModule->getLoc(),
+                                  "sparse_kernels");
 }
 
 /// Constructs a new GPU kernel in the given GPU module.
@@ -81,7 +81,7 @@ static gpu::GPUFuncOp genGPUFunc(OpBuilder &builder, gpu::GPUModuleOp gpuModule,
     argsTp.push_back(arg.getType());
   FunctionType type = FunctionType::get(gpuModule->getContext(), argsTp, {});
   auto gpuFunc =
-      builder.create<gpu::GPUFuncOp>(gpuModule->getLoc(), kernelName, type);
+      gpu::GPUFuncOp::create(builder, gpuModule->getLoc(), kernelName, type);
   gpuFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
                    builder.getUnitAttr());
   return gpuFunc;
@@ -115,28 +115,28 @@ static Value genHostRegisterMemref(OpBuilder &builder, Location loc,
   MemRefType memTp = cast<MemRefType>(mem.getType());
   UnrankedMemRefType resTp =
       UnrankedMemRefType::get(memTp.getElementType(), /*memorySpace=*/0);
-  Value cast = builder.create<memref::CastOp>(loc, resTp, mem);
-  builder.create<gpu::HostRegisterOp>(loc, cast);
+  Value cast = memref::CastOp::create(builder, loc, resTp, mem);
+  gpu::HostRegisterOp::create(builder, loc, cast);
   return cast;
 }
 
 /// Unmaps the provided buffer, expecting the casted buffer.
 static void genHostUnregisterMemref(OpBuilder &builder, Location loc,
                                     Value cast) {
-  builder.create<gpu::HostUnregisterOp>(loc, cast);
+  gpu::HostUnregisterOp::create(builder, loc, cast);
 }
 
 /// Generates first wait in an asynchronous chain.
 static Value genFirstWait(OpBuilder &builder, Location loc) {
   Type tokenType = builder.getType<gpu::AsyncTokenType>();
-  return builder.create<gpu::WaitOp>(loc, tokenType, ValueRange())
+  return gpu::WaitOp::create(builder, loc, tokenType, ValueRange())
       .getAsyncToken();
 }
 
 /// Generates last, blocking wait in an asynchronous chain.
 static void genBlockingWait(OpBuilder &builder, Location loc,
                             ValueRange operands) {
-  builder.create<gpu::WaitOp>(loc, Type(), operands);
+  gpu::WaitOp::create(builder, loc, Type(), operands);
 }
 
 /// Allocates memory on the device.
@@ -156,23 +156,23 @@ static gpu::AllocOp genAllocMemRef(OpBuilder &builder, Location loc, Value mem,
       dynamicSizes.push_back(dimOp);
     }
   }
-  return builder.create<gpu::AllocOp>(loc, TypeRange({memTp, token.getType()}),
-                                      token, dynamicSizes, ValueRange());
+  return gpu::AllocOp::create(builder, loc, TypeRange({memTp, token.getType()}),
+                              token, dynamicSizes, ValueRange());
 }
 
 // Allocates a typed buffer on the host with given size.
 static Value genHostBuffer(OpBuilder &builder, Location loc, Type type,
                            Value size) {
   const auto memTp = MemRefType::get({ShapedType::kDynamic}, type);
-  return builder.create<memref::AllocOp>(loc, memTp, size).getResult();
+  return memref::AllocOp::create(builder, loc, memTp, size).getResult();
 }
 
 // Allocates a typed buffer on the device with given size.
 static gpu::AllocOp genAllocBuffer(OpBuilder &builder, Location loc, Type type,
                                    Value size, Value token) {
   const auto memTp = MemRefType::get({ShapedType::kDynamic}, type);
-  return builder.create<gpu::AllocOp>(loc, TypeRange({memTp, token.getType()}),
-                                      token, size, ValueRange());
+  return gpu::AllocOp::create(builder, loc, TypeRange({memTp, token.getType()}),
+                              token, size, ValueRange());
 }
 
 // Allocates a void buffer on the device with given size.
@@ -184,14 +184,14 @@ static gpu::AllocOp genAllocBuffer(OpBuilder &builder, Location loc, Value size,
 /// Deallocates memory from the device.
 static Value genDeallocMemRef(OpBuilder &builder, Location loc, Value mem,
                               Value token) {
-  return builder.create<gpu::DeallocOp>(loc, token.getType(), token, mem)
+  return gpu::DeallocOp::create(builder, loc, token.getType(), token, mem)
       .getAsyncToken();
 }
 
 /// Copies memory between host and device (direction is implicit).
 static Value genCopyMemRef(OpBuilder &builder, Location loc, Value dst,
                            Value src, Value token) {
-  return builder.create<gpu::MemcpyOp>(loc, token.getType(), token, dst, src)
+  return gpu::MemcpyOp::create(builder, loc, token.getType(), token, dst, src)
       .getAsyncToken();
 }
 
@@ -212,7 +212,7 @@ static Value genTensorToMemref(PatternRewriter &rewriter, Location loc,
   auto tensorType = llvm::cast<ShapedType>(tensor.getType());
   auto memrefType =
       MemRefType::get(tensorType.getShape(), tensorType.getElementType());
-  return rewriter.create<bufferization::ToBufferOp>(loc, memrefType, tensor);
+  return bufferization::ToBufferOp::create(rewriter, loc, memrefType, tensor);
 }
 
 /// Prepares the outlined arguments, passing scalars and buffers in. Here we
@@ -293,13 +293,13 @@ static void genGPUCode(PatternRewriter &rewriter, gpu::GPUFuncOp gpuFunc,
   // so that:
   //   row = blockIdx.x * blockDim.x + threadIdx.x
   //   inc = blockDim.x * gridDim.x
-  Value bid = rewriter.create<gpu::BlockIdOp>(loc, gpu::Dimension::x);
-  Value bsz = rewriter.create<gpu::BlockDimOp>(loc, gpu::Dimension::x);
-  Value tid = rewriter.create<gpu::ThreadIdOp>(loc, gpu::Dimension::x);
-  Value gsz = rewriter.create<gpu::GridDimOp>(loc, gpu::Dimension::x);
-  Value mul = rewriter.create<arith::MulIOp>(loc, bid, bsz);
-  Value row = rewriter.create<arith::AddIOp>(loc, mul, tid);
-  Value inc = rewriter.create<arith::MulIOp>(loc, bsz, gsz);
+  Value bid = gpu::BlockIdOp::create(rewriter, loc, gpu::Dimension::x);
+  Value bsz = gpu::BlockDimOp::create(rewriter, loc, gpu::Dimension::x);
+  Value tid = gpu::ThreadIdOp::create(rewriter, loc, gpu::Dimension::x);
+  Value gsz = gpu::GridDimOp::create(rewriter, loc, gpu::Dimension::x);
+  Value mul = arith::MulIOp::create(rewriter, loc, bid, bsz);
+  Value row = arith::AddIOp::create(rewriter, loc, mul, tid);
+  Value inc = arith::MulIOp::create(rewriter, loc, bsz, gsz);
 
   // Construct the iteration over the computational space that
   // accounts for the fact that the total number of threads and
@@ -308,7 +308,7 @@ static void genGPUCode(PatternRewriter &rewriter, gpu::GPUFuncOp gpuFunc,
   //     <loop-body>
   //   }
   Value upper = irMap.lookup(forallOp.getUpperBound()[0]);
-  scf::ForOp forOp = rewriter.create<scf::ForOp>(loc, row, upper, inc);
+  scf::ForOp forOp = scf::ForOp::create(rewriter, loc, row, upper, inc);
   // The scf.for builder creates an empty block. scf.for does not allow multiple
   // blocks in its region, so delete the block before `cloneRegionBefore` adds
   // an additional block.
@@ -321,7 +321,7 @@ static void genGPUCode(PatternRewriter &rewriter, gpu::GPUFuncOp gpuFunc,
 
   // Done.
   rewriter.setInsertionPointAfter(forOp);
-  rewriter.create<gpu::ReturnOp>(gpuFunc->getLoc());
+  gpu::ReturnOp::create(rewriter, gpuFunc->getLoc());
 }
 
 //===----------------------------------------------------------------------===//
@@ -496,11 +496,11 @@ static Value genFirstPosOrCrds(OpBuilder &builder, Location loc, Value a,
   if (format == CuSparseFormat::kCOO) {
     // Library uses SoA COO, direct IR uses AoS COO.
     if (enableRT)
-      return builder.create<ToCoordinatesOp>(loc, a, 0);
-    return builder.create<ToCoordinatesBufferOp>(loc, a);
+      return ToCoordinatesOp::create(builder, loc, a, 0);
+    return ToCoordinatesBufferOp::create(builder, loc, a);
   }
   // Formats CSR/CSC and BSR use positions at 1.
-  return builder.create<ToPositionsOp>(loc, a, 1);
+  return ToPositionsOp::create(builder, loc, a, 1);
 }
 
 /// Generates the second coordinates of a sparse matrix.
@@ -510,7 +510,7 @@ static Value genSecondCrds(OpBuilder &builder, Location loc, Value a,
   if (isCOO && !enableRT)
     return Value(); // nothing needed
   // Formats CSR/CSC and BSR use coordinates at 1.
-  return builder.create<ToCoordinatesOp>(loc, a, 1);
+  return ToCoordinatesOp::create(builder, loc, a, 1);
 }
 
 /// Generates the sparse matrix handle.
@@ -523,24 +523,24 @@ static Operation *genSpMat(OpBuilder &builder, Location loc,
     // Library uses SoA COO, direct IR uses AoS COO.
     if (enableRT) {
       assert(colA);
-      return builder.create<gpu::CreateCooOp>(loc, handleTp, tokenTp, token,
-                                              sz1, sz2, nseA, rowA, colA, valA);
+      return gpu::CreateCooOp::create(builder, loc, handleTp, tokenTp, token,
+                                      sz1, sz2, nseA, rowA, colA, valA);
     }
 #ifdef CUSPARSE_COO_AOS
     assert(!colA);
-    return builder.create<gpu::CreateCooAoSOp>(loc, handleTp, tokenTp, token,
-                                               sz1, sz2, nseA, rowA, valA);
+    return gpu::CreateCooAoSOp::create(builder, loc, handleTp, tokenTp, token,
+                                       sz1, sz2, nseA, rowA, valA);
 #else
     llvm_unreachable("gpu::CreateCooAoSOp is deprecated");
 #endif
   }
   assert(colA);
   if (format == CuSparseFormat::kCSR)
-    return builder.create<gpu::CreateCsrOp>(loc, handleTp, tokenTp, token, sz1,
-                                            sz2, nseA, rowA, colA, valA);
+    return gpu::CreateCsrOp::create(builder, loc, handleTp, tokenTp, token, sz1,
+                                    sz2, nseA, rowA, colA, valA);
   if (format == CuSparseFormat::kCSC)
-    return builder.create<gpu::CreateCscOp>(loc, handleTp, tokenTp, token, sz1,
-                                            sz2, nseA, rowA, colA, valA);
+    return gpu::CreateCscOp::create(builder, loc, handleTp, tokenTp, token, sz1,
+                                    sz2, nseA, rowA, colA, valA);
   // BSR requires a bit more work since we need to pass in the block size
   // and all others sizes in terms of blocks (#block-rows, #block-cols,
   // #nonzero-blocks).
@@ -549,13 +549,12 @@ static Operation *genSpMat(OpBuilder &builder, Location loc,
   assert(dims.size() == 2 && dims[0] == dims[1]);
   uint64_t b = dims[0];
   Value bSz = constantIndex(builder, loc, b);
-  Value bRows = builder.create<arith::DivUIOp>(loc, sz1, bSz);
-  Value bCols = builder.create<arith::DivUIOp>(loc, sz2, bSz);
-  Value bNum = builder.create<arith::DivUIOp>(
-      loc, nseA, constantIndex(builder, loc, b * b));
-  return builder.create<gpu::CreateBsrOp>(loc, handleTp, tokenTp, token, bRows,
-                                          bCols, bNum, bSz, bSz, rowA, colA,
-                                          valA);
+  Value bRows = arith::DivUIOp::create(builder, loc, sz1, bSz);
+  Value bCols = arith::DivUIOp::create(builder, loc, sz2, bSz);
+  Value bNum = arith::DivUIOp::create(builder, loc, nseA,
+                                      constantIndex(builder, loc, b * b));
+  return gpu::CreateBsrOp::create(builder, loc, handleTp, tokenTp, token, bRows,
+                                  bCols, bNum, bSz, bSz, rowA, colA, valA);
 }
 
 /// Match and rewrite SpMV kernel.
@@ -579,12 +578,12 @@ static LogicalResult rewriteSpMV(PatternRewriter &rewriter,
   //   a : memR/memC/memV -> rowA,colA,valA
   //   x : memX           -> vecX
   //   y : memY           -> vecY
-  Value nseA = rewriter.create<NumberOfEntriesOp>(loc, a);
+  Value nseA = NumberOfEntriesOp::create(rewriter, loc, a);
   Value szY = linalg::createOrFoldDimOp(rewriter, loc, a, 0);
   Value szX = linalg::createOrFoldDimOp(rewriter, loc, a, 1);
   Value memR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
   Value memC = genSecondCrds(rewriter, loc, a, format, enableRT); // or empty
-  Value memV = rewriter.create<ToValuesOp>(loc, a);
+  Value memV = ToValuesOp::create(rewriter, loc, a);
   Value rowA = genAllocCopy(rewriter, loc, memR, tokens);
   Value colA = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
   Value valA = genAllocCopy(rewriter, loc, memV, tokens);
@@ -606,19 +605,19 @@ static LogicalResult rewriteSpMV(PatternRewriter &rewriter,
                nseA, rowA, colA, valA, format, enableRT);
   Value spMatA = spGenA->getResult(0);
   token = spGenA->getResult(1);
-  auto dvecX = rewriter.create<gpu::CreateDnTensorOp>(
-      loc, dnTensorHandleTp, tokenTp, token, vecX, szX);
+  auto dvecX = gpu::CreateDnTensorOp::create(rewriter, loc, dnTensorHandleTp,
+                                             tokenTp, token, vecX, szX);
   Value dnX = dvecX.getResult(0);
   token = dvecX.getAsyncToken();
-  auto dvecY = rewriter.create<gpu::CreateDnTensorOp>(
-      loc, dnTensorHandleTp, tokenTp, token, vecY, szY);
+  auto dvecY = gpu::CreateDnTensorOp::create(rewriter, loc, dnTensorHandleTp,
+                                             tokenTp, token, vecY, szY);
   Value dnY = dvecY.getResult(0);
   token = dvecY.getAsyncToken();
   auto dnYType = llvm::cast<ShapedType>(y.getType()).getElementType();
 
   // Precompute buffersize for SpMV.
-  auto bufferComp = rewriter.create<gpu::SpMVBufferSizeOp>(
-      loc, indexTp, tokenTp, token, spMatA, dnX, dnY,
+  auto bufferComp = gpu::SpMVBufferSizeOp::create(
+      rewriter, loc, indexTp, tokenTp, token, spMatA, dnX, dnY,
       /*computeType=*/dnYType);
   Value bufferSz = bufferComp.getResult(0);
   token = bufferComp.getAsyncToken();
@@ -627,16 +626,17 @@ static LogicalResult rewriteSpMV(PatternRewriter &rewriter,
   token = buf.getAsyncToken();
 
   // Perform the SpMV.
-  auto spmvComp = rewriter.create<gpu::SpMVOp>(
-      loc, tokenTp, token, spMatA, dnX, dnY, /*computeType=*/dnYType, buffer);
+  auto spmvComp =
+      gpu::SpMVOp::create(rewriter, loc, tokenTp, token, spMatA, dnX, dnY,
+                          /*computeType=*/dnYType, buffer);
   token = spmvComp.getAsyncToken();
 
   // Copy data back to host and free all the resoures.
-  token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatA)
+  token = gpu::DestroySpMatOp::create(rewriter, loc, tokenTp, token, spMatA)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnX)
+  token = gpu::DestroyDnTensorOp::create(rewriter, loc, tokenTp, token, dnX)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnY)
+  token = gpu::DestroyDnTensorOp::create(rewriter, loc, tokenTp, token, dnY)
               .getAsyncToken();
   token = genDeallocMemRef(rewriter, loc, rowA, token);
   if (colA)
@@ -676,13 +676,13 @@ static LogicalResult rewriteSpMM(PatternRewriter &rewriter,
   //   a : memR/memC/memV -> rowA,colA,valA
   //   b : bufB           -> matB
   //   c : bufC           -> matC
-  Value nseA = rewriter.create<NumberOfEntriesOp>(loc, a);
+  Value nseA = NumberOfEntriesOp::create(rewriter, loc, a);
   Value szm = linalg::createOrFoldDimOp(rewriter, loc, a, 0);
   Value szk = linalg::createOrFoldDimOp(rewriter, loc, a, 1);
   Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1);
   Value memR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
   Value memC = genSecondCrds(rewriter, loc, a, format, enableRT); // or empty
-  Value memV = rewriter.create<ToValuesOp>(loc, a);
+  Value memV = ToValuesOp::create(rewriter, loc, a);
   Value rowA = genAllocCopy(rewriter, loc, memR, tokens);
   Value colA = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
   Value valA = genAllocCopy(rewriter, loc, memV, tokens);
@@ -704,21 +704,21 @@ static LogicalResult rewriteSpMM(PatternRewriter &rewriter,
                nseA, rowA, colA, valA, format, enableRT);
   Value spMatA = spGenA->getResult(0);
   token = spGenA->getResult(1);
-  auto dmatB = rewriter.create<gpu::CreateDnTensorOp>(
-      loc, dnTensorHandleTp, tokenTp, token, matB,
-      SmallVector<Value>{szk, szn});
+  auto dmatB =
+      gpu::CreateDnTensorOp::create(rewriter, loc, dnTensorHandleTp, tokenTp,
+                                    token, matB, SmallVector<Value>{szk, szn});
   Value dnB = dmatB.getResult(0);
   token = dmatB.getAsyncToken();
-  auto dmatC = rewriter.create<gpu::CreateDnTensorOp>(
-      loc, dnTensorHandleTp, tokenTp, token, matC,
-      SmallVector<Value>{szm, szn});
+  auto dmatC =
+      gpu::CreateDnTensorOp::create(rewriter, loc, dnTensorHandleTp, tokenTp,
+                                    token, matC, SmallVector<Value>{szm, szn});
   Value dnC = dmatC.getResult(0);
   token = dmatC.getAsyncToken();
   auto dmatCType = llvm::cast<ShapedType>(c.getType()).getElementType();
 
   // Precompute buffersize for SpMM.
-  auto bufferComp = rewriter.create<gpu::SpMMBufferSizeOp>(
-      loc, indexTp, tokenTp, token, spMatA, dnB, dnC,
+  auto bufferComp = gpu::SpMMBufferSizeOp::create(
+      rewriter, loc, indexTp, tokenTp, token, spMatA, dnB, dnC,
       /*computeType=*/dmatCType);
   Value bufferSz = bufferComp.getResult(0);
   token = bufferComp.getAsyncToken();
@@ -728,16 +728,17 @@ static LogicalResult rewriteSpMM(PatternRewriter &rewriter,
   auto dnCType = llvm::cast<ShapedType>(c.getType()).getElementType();
 
   // Perform the SpMM.
-  auto spmmComp = rewriter.create<gpu::SpMMOp>(
-      loc, tokenTp, token, spMatA, dnB, dnC, /*computeType=*/dnCType, buffer);
+  auto spmmComp =
+      gpu::SpMMOp::create(rewriter, loc, tokenTp, token, spMatA, dnB, dnC,
+                          /*computeType=*/dnCType, buffer);
   token = spmmComp.getAsyncToken();
 
   // Copy data back to host and free all the resoures.
-  token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatA)
+  token = gpu::DestroySpMatOp::create(rewriter, loc, tokenTp, token, spMatA)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnB)
+  token = gpu::DestroyDnTensorOp::create(rewriter, loc, tokenTp, token, dnB)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnC)
+  token = gpu::DestroyDnTensorOp::create(rewriter, loc, tokenTp, token, dnC)
               .getAsyncToken();
   token = genDeallocMemRef(rewriter, loc, rowA, token);
   if (colA)
@@ -778,17 +779,17 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter,
   //   b : bmemR/bmemC/bmemV -> rowB,colB,valB
   //   c : materializes
   auto dnCType = cTp.getElementType();
-  Value nseA = rewriter.create<NumberOfEntriesOp>(loc, a);
-  Value nseB = rewriter.create<NumberOfEntriesOp>(loc, b);
+  Value nseA = NumberOfEntriesOp::create(rewriter, loc, a);
+  Value nseB = NumberOfEntriesOp::create(rewriter, loc, b);
   Value szm = linalg::createOrFoldDimOp(rewriter, loc, a, 0);
   Value szk = linalg::createOrFoldDimOp(rewriter, loc, a, 1);
   Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1);
   Value amemR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
   Value amemC = genSecondCrds(rewriter, loc, a, format, enableRT); // not empty
-  Value amemV = rewriter.create<ToValuesOp>(loc, a);
+  Value amemV = ToValuesOp::create(rewriter, loc, a);
   Value bmemR = genFirstPosOrCrds(rewriter, loc, b, format, enableRT);
   Value bmemC = genSecondCrds(rewriter, loc, b, format, enableRT); // not empty
-  Value bmemV = rewriter.create<ToValuesOp>(loc, b);
+  Value bmemV = ToValuesOp::create(rewriter, loc, b);
   Value rowA = genAllocCopy(rewriter, loc, amemR, tokens);
   Value colA = genAllocCopy(rewriter, loc, amemC, tokens);
   Value valA = genAllocCopy(rewriter, loc, amemV, tokens);
@@ -818,7 +819,7 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter,
   // Sparse matrix C materializes (also assumes beta == 0).
   Value zero = constantIndex(rewriter, loc, 0);
   Value one = constantIndex(rewriter, loc, 1);
-  Value mplus1 = rewriter.create<arith::AddIOp>(loc, szm, one);
+  Value mplus1 = arith::AddIOp::create(rewriter, loc, szm, one);
   auto e1 = genAllocBuffer(rewriter, loc, cTp.getPosType(), mplus1, token);
   Value rowC = e1.getResult(0);
   token = e1.getAsyncToken();
@@ -836,44 +837,47 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter,
 
   // Precompute buffersizes for SpGEMM.
   Operation *descOp =
-      rewriter.create<gpu::SpGEMMCreateDescrOp>(loc, descTp, tokenTp, token);
+      gpu::SpGEMMCreateDescrOp::create(rewriter, loc, descTp, tokenTp, token);
   Value desc = descOp->getResult(0);
   token = descOp->getResult(1);
-  Operation *work1 = rewriter.create<gpu::SpGEMMWorkEstimationOrComputeOp>(
-      loc, indexTp, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE,
-      gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType, zero,
-      valC, gpu::SpGEMMWorkEstimationOrComputeKind::WORK_ESTIMATION);
+  Operation *work1 = gpu::SpGEMMWorkEstimationOrComputeOp::create(
+      rewriter, loc, indexTp, tokenTp, token, desc,
+      gpu::TransposeMode::NON_TRANSPOSE, gpu::TransposeMode::NON_TRANSPOSE,
+      spMatA, spMatB, spMatC, dnCType, zero, valC,
+      gpu::SpGEMMWorkEstimationOrComputeKind::WORK_ESTIMATION);
   Value bufferSz1 = work1->getResult(0);
   token = work1->getResult(1);
   auto buf1 = genAllocBuffer(rewriter, loc, bufferSz1, token);
   Value buffer1 = buf1.getResult(0);
   token = buf1.getAsyncToken();
-  Operation *work2 = rewriter.create<gpu::SpGEMMWorkEstimationOrComputeOp>(
-      loc, indexTp, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE,
-      gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType,
-      bufferSz1, buffer1,
+  Operation *work2 = gpu::SpGEMMWorkEstimationOrComputeOp::create(
+      rewriter, loc, indexTp, tokenTp, token, desc,
+      gpu::TransposeMode::NON_TRANSPOSE, gpu::TransposeMode::NON_TRANSPOSE,
+      spMatA, spMatB, spMatC, dnCType, bufferSz1, buffer1,
       gpu::SpGEMMWorkEstimationOrComputeKind::WORK_ESTIMATION);
   token = work2->getResult(1);
 
   // Compute step.
-  Operation *compute1 = rewriter.create<gpu::SpGEMMWorkEstimationOrComputeOp>(
-      loc, indexTp, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE,
-      gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType, zero,
-      valC, gpu::SpGEMMWorkEstimationOrComputeKind::COMPUTE);
+  Operation *compute1 = gpu::SpGEMMWorkEstimationOrComputeOp::create(
+      rewriter, loc, indexTp, tokenTp, token, desc,
+      gpu::TransposeMode::NON_TRANSPOSE, gpu::TransposeMode::NON_TRANSPOSE,
+      spMatA, spMatB, spMatC, dnCType, zero, valC,
+      gpu::SpGEMMWorkEstimationOrComputeKind::COMPUTE);
   Value bufferSz2 = compute1->getResult(0);
   token = compute1->getResult(1);
   auto buf2 = genAllocBuffer(rewriter, loc, bufferSz2, token);
   Value buffer2 = buf2.getResult(0);
   token = buf2.getAsyncToken();
-  Operation *compute2 = rewriter.create<gpu::SpGEMMWorkEstimationOrComputeOp>(
-      loc, indexTp, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE,
-      gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType,
-      bufferSz2, buffer2, gpu::SpGEMMWorkEstimationOrComputeKind::COMPUTE);
+  Operation *compute2 = gpu::SpGEMMWorkEstimationOrComputeOp::create(
+      rewriter, loc, indexTp, tokenTp, token, desc,
+      gpu::TransposeMode::NON_TRANSPOSE, gpu::TransposeMode::NON_TRANSPOSE,
+      spMatA, spMatB, spMatC, dnCType, bufferSz2, buffer2,
+      gpu::SpGEMMWorkEstimationOrComputeKind::COMPUTE);
   token = compute2->getResult(1);
 
   // Get sizes.
-  Operation *sizes = rewriter.create<gpu::SpMatGetSizeOp>(
-      loc, indexTp, indexTp, indexTp, tokenTp, token, spMatC);
+  Operation *sizes = gpu::SpMatGetSizeOp::create(
+      rewriter, loc, indexTp, indexTp, indexTp, tokenTp, token, spMatC);
   Value nnz = sizes->getResult(2);
   token = sizes->getResult(3);
   auto a2 = genAllocBuffer(rewriter, loc, cTp.getCrdType(), nnz, token);
@@ -884,11 +888,11 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter,
   token = a3.getAsyncToken();
 
   // Update C with new pointers and copy final product back into C.
-  Operation *update = rewriter.create<gpu::SetCsrPointersOp>(
-      loc, tokenTp, token, spMatC, rowC, colC, valC);
+  Operation *update = gpu::SetCsrPointersOp::create(
+      rewriter, loc, tokenTp, token, spMatC, rowC, colC, valC);
   token = update->getResult(0);
-  Operation *copy = rewriter.create<gpu::SpGEMMCopyOp>(
-      loc, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE,
+  Operation *copy = gpu::SpGEMMCopyOp::create(
+      rewriter, loc, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE,
       gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType);
   token = copy->getResult(0);
 
@@ -898,13 +902,13 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter,
   Value valH = genHostBuffer(rewriter, loc, dnCType, nnz);
 
   // Copy data back to host and free all the resoures.
-  token = rewriter.create<gpu::SpGEMMDestroyDescrOp>(loc, tokenTp, token, desc)
+  token = gpu::SpGEMMDestroyDescrOp::create(rewriter, loc, tokenTp, token, desc)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatA)
+  token = gpu::DestroySpMatOp::create(rewriter, loc, tokenTp, token, spMatA)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatB)
+  token = gpu::DestroySpMatOp::create(rewriter, loc, tokenTp, token, spMatB)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatC)
+  token = gpu::DestroySpMatOp::create(rewriter, loc, tokenTp, token, spMatC)
               .getAsyncToken();
   token = genCopyMemRef(rewriter, loc, rowH, rowC, token);
   token = genCopyMemRef(rewriter, loc, colH, colC, token);
@@ -925,12 +929,12 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter,
   tokens.clear();
 
   // Done.
-  Value vt = rewriter.create<bufferization::ToTensorOp>(
-      loc, memref::getTensorTypeFromMemRefType(valH.getType()), valH);
-  Value rt = rewriter.create<bufferization::ToTensorOp>(
-      loc, memref::getTensorTypeFromMemRefType(rowH.getType()), rowH);
-  Value ct = rewriter.create<bufferization::ToTensorOp>(
-      loc, memref::getTensorTypeFromMemRefType(colH.getType()), colH);
+  Value vt = bufferization::ToTensorOp::create(
+      rewriter, loc, memref::getTensorTypeFromMemRefType(valH.getType()), valH);
+  Value rt = bufferization::ToTensorOp::create(
+      rewriter, loc, memref::getTensorTypeFromMemRefType(rowH.getType()), rowH);
+  Value ct = bufferization::ToTensorOp::create(
+      rewriter, loc, memref::getTensorTypeFromMemRefType(colH.getType()), colH);
   rewriter.replaceOpWithNewOp<AssembleOp>(op, c.getType(), ValueRange{rt, ct},
                                           vt);
   return success();
@@ -980,19 +984,19 @@ static LogicalResult rewrite2To4SpMM(PatternRewriter &rewriter,
   Type spMatHandleTp = rewriter.getType<gpu::SparseSpMatHandleType>();
   Type tokenTp = rewriter.getType<gpu::AsyncTokenType>();
   Value token = genFirstWait(rewriter, loc);
-  Operation *spGenA = rewriter.create<gpu::Create2To4SpMatOp>(
-      loc, spMatHandleTp, tokenTp, token, szm, szk,
+  Operation *spGenA = gpu::Create2To4SpMatOp::create(
+      rewriter, loc, spMatHandleTp, tokenTp, token, szm, szk,
       gpu::Prune2To4SpMatFlag::PRUNE_AND_CHECK, matA);
   Value spMatA = spGenA->getResult(0);
   token = spGenA->getResult(1);
-  auto dmatB = rewriter.create<gpu::CreateDnTensorOp>(
-      loc, dnTensorHandleTp, tokenTp, token, matB,
-      SmallVector<Value>{szk, szn});
+  auto dmatB =
+      gpu::CreateDnTensorOp::create(rewriter, loc, dnTensorHandleTp, tokenTp,
+                                    token, matB, SmallVector<Value>{szk, szn});
   Value dnB = dmatB.getResult(0);
   token = dmatB.getAsyncToken();
-  auto dmatC = rewriter.create<gpu::CreateDnTensorOp>(
-      loc, dnTensorHandleTp, tokenTp, token, matC,
-      SmallVector<Value>{szm, szn});
+  auto dmatC =
+      gpu::CreateDnTensorOp::create(rewriter, loc, dnTensorHandleTp, tokenTp,
+                                    token, matC, SmallVector<Value>{szm, szn});
   Value dnC = dmatC.getResult(0);
   token = dmatC.getAsyncToken();
   auto dmatCType = llvm::cast<ShapedType>(matC.getType()).getElementType();
@@ -1000,9 +1004,10 @@ static LogicalResult rewrite2To4SpMM(PatternRewriter &rewriter,
   // Precompute buffersize for SpMM.
   SmallVector<Type> bufferTypes_{indexTp, indexTp, indexTp};
   TypeRange bufferTypes(bufferTypes_);
-  auto bufferComp = rewriter.create<gpu::SpMMBufferSizeOp>(
-      loc, bufferTypes, tokenTp, token, gpu::TransposeMode::NON_TRANSPOSE,
-      gpu::TransposeMode::NON_TRANSPOSE, spMatA, dnB, dnC,
+  auto bufferComp = gpu::SpMMBufferSizeOp::create(
+      rewriter, loc, bufferTypes, tokenTp, token,
+      gpu::TransposeMode::NON_TRANSPOSE, gpu::TransposeMode::NON_TRANSPOSE,
+      spMatA, dnB, dnC,
       /*computeType=*/dmatCType);
   token = bufferComp.getAsyncToken();
 
@@ -1022,17 +1027,17 @@ static LogicalResult rewrite2To4SpMM(PatternRewriter &rewriter,
 
   // Perform the SpMM.
   auto dnCType = llvm::cast<ShapedType>(matC.getType()).getElementType();
-  auto spmmComp = rewriter.create<gpu::SpMMOp>(
-      loc, tokenTp, token, spMatA, dnB, dnC, /*computeType=*/dnCType,
+  auto spmmComp = gpu::SpMMOp::create(
+      rewriter, loc, tokenTp, token, spMatA, dnB, dnC, /*computeType=*/dnCType,
       SmallVector<Value>{buffer1, buffer2, buffer3});
   token = spmmComp.getAsyncToken();
 
   // Copy data back to host and free all the resources.
-  token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatA)
+  token = gpu::DestroySpMatOp::create(rewriter, loc, tokenTp, token, spMatA)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnB)
+  token = gpu::DestroyDnTensorOp::create(rewriter, loc, tokenTp, token, dnB)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnC)
+  token = gpu::DestroyDnTensorOp::create(rewriter, loc, tokenTp, token, dnC)
               .getAsyncToken();
   token = genDeallocMemRef(rewriter, loc, buffer1, token);
   token = genDeallocMemRef(rewriter, loc, buffer2, token);
@@ -1073,7 +1078,7 @@ static LogicalResult rewriteSDDMM(PatternRewriter &rewriter,
   //   a : bufA           -> matA
   //   b : bufB           -> matB
   //   c : memR/memC/memV -> rowC,colC,valC
-  Value nseC = rewriter.create<NumberOfEntriesOp>(loc, c);
+  Value nseC = NumberOfEntriesOp::create(rewriter, loc, c);
   Value szm = linalg::createOrFoldDimOp(rewriter, loc, a, 0);
   Value szk = linalg::createOrFoldDimOp(rewriter, loc, a, 1);
   Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1);
@@ -1083,7 +1088,7 @@ static LogicalResult rewriteSDDMM(PatternRewriter &rewriter,
   Value matB = genAllocCopy(rewriter, loc, bufB, tokens);
   Value memR = genFirstPosOrCrds(rewriter, loc, c, format, enableRT);
   Value memC = genSecondCrds(rewriter, loc, c, format, enableRT); // or empty
-  Value memV = rewriter.create<ToValuesOp>(loc, c);
+  Value memV = ToValuesOp::create(rewriter, loc, c);
   Value rowC = genAllocCopy(rewriter, loc, memR, tokens);
   Value colC = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
   Value valC = genAllocCopy(rewriter, loc, memV, tokens);
@@ -1096,12 +1101,14 @@ static LogicalResult rewriteSDDMM(PatternRewriter &rewriter,
   Type spMatHandleTp = rewriter.getType<gpu::SparseSpMatHandleType>();
   Type tokenTp = rewriter.getType<gpu::AsyncTokenType>();
   Value token = genFirstWait(rewriter, loc);
-  auto dmatA = rewriter.create<gpu::CreateDnTensorOp>(
-      loc, dnMatHandleTp, tokenTp, token, matA, SmallVector<Value>{szm, szk});
+  auto dmatA =
+      gpu::CreateDnTensorOp::create(rewriter, loc, dnMatHandleTp, tokenTp,
+                                    token, matA, SmallVector<Value>{szm, szk});
   Value dnA = dmatA.getResult(0);
   token = dmatA.getAsyncToken();
-  auto dmatB = rewriter.create<gpu::CreateDnTensorOp>(
-      loc, dnMatHandleTp, tokenTp, token, matB, SmallVector<Value>{szk, szn});
+  auto dmatB =
+      gpu::CreateDnTensorOp::create(rewriter, loc, dnMatHandleTp, tokenTp,
+                                    token, matB, SmallVector<Value>{szk, szn});
   Value dnB = dmatB.getResult(0);
   token = dmatB.getAsyncToken();
   Operation *spGenC =
@@ -1112,8 +1119,8 @@ static LogicalResult rewriteSDDMM(PatternRewriter &rewriter,
   auto dnCType = llvm::cast<ShapedType>(c.getType()).getElementType();
 
   // Precompute buffersize for SDDMM.
-  auto bufferComp = rewriter.create<gpu::SDDMMBufferSizeOp>(
-      loc, indexTp, tokenTp, token, dnA, dnB, spMatC, dnCType);
+  auto bufferComp = gpu::SDDMMBufferSizeOp::create(
+      rewriter, loc, indexTp, tokenTp, token, dnA, dnB, spMatC, dnCType);
   Value bufferSz = bufferComp.getResult(0);
   token = bufferComp.getAsyncToken();
   auto buf = genAllocBuffer(rewriter, loc, bufferSz, token);
@@ -1121,16 +1128,16 @@ static LogicalResult rewriteSDDMM(PatternRewriter &rewriter,
   token = buf.getAsyncToken();
 
   // Perform the SDDMM.
-  auto sddmmComp = rewriter.create<gpu::SDDMMOp>(loc, tokenTp, token, dnA, dnB,
-                                                 spMatC, dnCType, buffer);
+  auto sddmmComp = gpu::SDDMMOp::create(rewriter, loc, tokenTp, token, dnA, dnB,
+                                        spMatC, dnCType, buffer);
   token = sddmmComp.getAsyncToken();
 
   // Copy data back to host and free all the resoures.
-  token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnA)
+  token = gpu::DestroyDnTensorOp::create(rewriter, loc, tokenTp, token, dnA)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnB)
+  token = gpu::DestroyDnTensorOp::create(rewriter, loc, tokenTp, token, dnB)
               .getAsyncToken();
-  token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatC)
+  token = gpu::DestroySpMatOp::create(rewriter, loc, tokenTp, token, spMatC)
               .getAsyncToken();
   token = genDeallocMemRef(rewriter, loc, buffer, token);
   token = genDeallocMemRef(rewriter, loc, matA, token);

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp
index 2f68008e68b5f..dfb127444e281 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp
@@ -67,12 +67,12 @@ genCoIterateBranchNest(PatternRewriter &rewriter, Location loc, CoIterateOp op,
       op.getRegionDefinedSpace(newBlock->getParent()->getRegionNumber());
   for (unsigned i : caseBits.bits()) {
     SparseIterator *it = iters[i].get();
-    Value pred = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
-                                                it->getCrd(), loopCrd);
-    casePred = rewriter.create<arith::AndIOp>(loc, casePred, pred);
+    Value pred = arith::CmpIOp::create(rewriter, loc, arith::CmpIPredicate::eq,
+                                       it->getCrd(), loopCrd);
+    casePred = arith::AndIOp::create(rewriter, loc, casePred, pred);
   }
-  scf::IfOp ifOp = rewriter.create<scf::IfOp>(
-      loc, ValueRange(userReduc).getTypes(), casePred, /*else=*/true);
+  scf::IfOp ifOp = scf::IfOp::create(
+      rewriter, loc, ValueRange(userReduc).getTypes(), casePred, /*else=*/true);
   rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
 
   // Erase the empty block.
@@ -103,7 +103,7 @@ genCoIterateBranchNest(PatternRewriter &rewriter, Location loc, CoIterateOp op,
   ValueRange yields = spY.getResults();
   rewriter.eraseOp(spY);
   rewriter.setInsertionPointToEnd(&ifOp.getThenRegion().front());
-  rewriter.create<scf::YieldOp>(loc, yields);
+  scf::YieldOp::create(rewriter, loc, yields);
 
   // Generates remaining case recursively.
   rewriter.setInsertionPointToStart(&ifOp.getElseRegion().front());
@@ -111,7 +111,7 @@ genCoIterateBranchNest(PatternRewriter &rewriter, Location loc, CoIterateOp op,
                                           newBlocks.drop_front(),
                                           oldBlocks.drop_front(), userReduc);
   if (!res.empty())
-    rewriter.create<scf::YieldOp>(loc, res);
+    scf::YieldOp::create(rewriter, loc, res);
 
   rewriter.setInsertionPointAfter(ifOp);
   return ifOp.getResults();
@@ -127,8 +127,8 @@ static ValueRange genLoopWithIterator(
   if (it->iteratableByFor()) {
     auto [lo, hi] = it->genForCond(rewriter, loc);
     Value step = constantIndex(rewriter, loc, 1);
-    scf::ForOp forOp = rewriter.create<scf::ForOp>(
-        loc, lo, hi, step, reduc,
+    scf::ForOp forOp = scf::ForOp::create(
+        rewriter, loc, lo, hi, step, reduc,
         [&](OpBuilder &b, Location loc, Value iv, ValueRange iterArgs) {
           // Empty builder function to ensure that no terminator is created.
         });
@@ -140,7 +140,7 @@ static ValueRange genLoopWithIterator(
                                            it, forOp.getRegionIterArgs());
 
       rewriter.setInsertionPointToEnd(forOp.getBody());
-      rewriter.create<scf::YieldOp>(loc, ret);
+      scf::YieldOp::create(rewriter, loc, ret);
     }
     return forOp.getResults();
   }
@@ -149,7 +149,7 @@ static ValueRange genLoopWithIterator(
   llvm::append_range(ivs, it->getCursor());
 
   TypeRange types = ValueRange(ivs).getTypes();
-  auto whileOp = rewriter.create<scf::WhileOp>(loc, types, ivs);
+  auto whileOp = scf::WhileOp::create(rewriter, loc, types, ivs);
   {
     OpBuilder::InsertionGuard guard(rewriter);
     // Generates loop conditions.
@@ -158,7 +158,7 @@ static ValueRange genLoopWithIterator(
     rewriter.setInsertionPointToStart(before);
     ValueRange bArgs = before->getArguments();
     auto [whileCond, remArgs] = it->genWhileCond(rewriter, loc, bArgs);
-    rewriter.create<scf::ConditionOp>(loc, whileCond, before->getArguments());
+    scf::ConditionOp::create(rewriter, loc, whileCond, before->getArguments());
 
     // Delegates loop body generation.
     Region &dstRegion = whileOp.getAfter();
@@ -175,7 +175,7 @@ static ValueRange genLoopWithIterator(
     SmallVector<Value> yields;
     llvm::append_range(yields, ret);
     llvm::append_range(yields, it->forward(rewriter, loc));
-    rewriter.create<scf::YieldOp>(loc, yields);
+    scf::YieldOp::create(rewriter, loc, yields);
   }
   return whileOp.getResults().drop_front(it->getCursor().size());
 }
@@ -212,8 +212,8 @@ class ExtractValOpConverter : public OpConversionPattern<ExtractValOp> {
                   ConversionPatternRewriter &rewriter) const override {
     Location loc = op.getLoc();
     Value pos = adaptor.getIterator().back();
-    Value valBuf = rewriter.create<ToValuesOp>(
-        loc, llvm::getSingleElement(adaptor.getTensor()));
+    Value valBuf = ToValuesOp::create(
+        rewriter, loc, llvm::getSingleElement(adaptor.getTensor()));
     rewriter.replaceOpWithNewOp<memref::LoadOp>(op, valBuf, pos);
     return success();
   }
@@ -385,12 +385,12 @@ class SparseCoIterateOpConverter : public OpConversionPattern<CoIterateOp> {
         SmallVector<Value> nextIterYields(res);
         // 2nd. foward the loop.
         for (SparseIterator *it : validIters) {
-          Value cmp = rewriter.create<arith::CmpIOp>(
-              loc, arith::CmpIPredicate::eq, it->getCrd(), loopCrd);
+          Value cmp = arith::CmpIOp::create(
+              rewriter, loc, arith::CmpIPredicate::eq, it->getCrd(), loopCrd);
           it->forwardIf(rewriter, loc, cmp);
           llvm::append_range(nextIterYields, it->getCursor());
         }
-        rewriter.create<scf::YieldOp>(loc, nextIterYields);
+        scf::YieldOp::create(rewriter, loc, nextIterYields);
 
         // Exit the loop, relink the iterator SSA value.
         rewriter.setInsertionPointAfter(loop);

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseReinterpretMap.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseReinterpretMap.cpp
index 4f554756b3dd2..df9b6cf040efa 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseReinterpretMap.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseReinterpretMap.cpp
@@ -43,7 +43,8 @@ struct DemapInsRewriter : public OpRewritePattern<SourceOp> {
     SmallVector<Value> deMappedIns(op->getOperands());
     for (Value &in : deMappedIns) {
       if (auto stt = tryGetSparseTensorType(in); stt && !stt->isIdentity()) {
-        in = rewriter.create<ReinterpretMapOp>(loc, stt->getDemappedType(), in);
+        in =
+            ReinterpretMapOp::create(rewriter, loc, stt->getDemappedType(), in);
         changed = true;
       }
     }
@@ -337,14 +338,14 @@ translateMap(linalg::GenericOp op, PatternRewriter &rewriter) {
 // Generates a "de"mapping reinterpretation of the map.
 static Value genDemap(OpBuilder &builder, SparseTensorEncodingAttr enc,
                       Value val) {
-  return builder.create<ReinterpretMapOp>(val.getLoc(), enc.withoutDimToLvl(),
-                                          val);
+  return ReinterpretMapOp::create(builder, val.getLoc(), enc.withoutDimToLvl(),
+                                  val);
 }
 
 // Generates a "re"mapping reinterpretation of the map.
 static Value genRemap(OpBuilder &builder, SparseTensorEncodingAttr enc,
                       Value val) {
-  return builder.create<ReinterpretMapOp>(val.getLoc(), enc, val);
+  return ReinterpretMapOp::create(builder, val.getLoc(), enc, val);
 }
 
 static SmallVector<Value> remapValueRange(OpBuilder &rewriter, TypeRange types,
@@ -353,7 +354,7 @@ static SmallVector<Value> remapValueRange(OpBuilder &rewriter, TypeRange types,
   assert(outs.size() == types.size());
   for (auto [r, t] : llvm::zip(ret, types))
     if (r.getType() != t)
-      r = rewriter.create<ReinterpretMapOp>(r.getLoc(), t, r);
+      r = ReinterpretMapOp::create(rewriter, r.getLoc(), t, r);
   return ret;
 }
 
@@ -566,7 +567,7 @@ struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
       // Inserting the transpose
       rewriter.setInsertionPoint(linalgOp);
       RankedTensorType dstTp = stt.withDimToLvl(dimToLvl).getRankedTensorType();
-      Value dst = rewriter.create<ConvertOp>(tval.getLoc(), dstTp, tval);
+      Value dst = ConvertOp::create(rewriter, tval.getLoc(), dstTp, tval);
       rewriter.modifyOpInPlace(linalgOp, [&]() {
         linalgOp->setOperand(t->getOperandNumber(), dst);
       });
@@ -574,7 +575,7 @@ struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
       // Release the transposed form afterwards.
       // TODO: CSE when used in more than one following op?
       rewriter.setInsertionPointAfter(linalgOp);
-      rewriter.create<bufferization::DeallocTensorOp>(dst.getLoc(), dst);
+      bufferization::DeallocTensorOp::create(rewriter, dst.getLoc(), dst);
 
       return success();
     }
@@ -604,8 +605,8 @@ struct TensorAllocDemapper : public OpRewritePattern<AllocOp> {
     ValueRange dynSz = op.getDynamicSizes();
     for (int64_t dimSz : stt.getDimShape()) {
       if (ShapedType::isDynamic(dimSz)) {
-        Value maxCrd = rewriter.create<arith::SubIOp>(
-            loc, dynSz.front(), constantIndex(rewriter, loc, 1));
+        Value maxCrd = arith::SubIOp::create(rewriter, loc, dynSz.front(),
+                                             constantIndex(rewriter, loc, 1));
         maxDimCrds.push_back(maxCrd);
         dynSz = dynSz.drop_front();
       } else {
@@ -619,8 +620,8 @@ struct TensorAllocDemapper : public OpRewritePattern<AllocOp> {
     SmallVector<Value> dynLvlSzs;
     for (unsigned i = 0, e = lvlShape.size(); i < e; i++) {
       if (ShapedType::isDynamic(lvlShape[i])) {
-        Value sz = rewriter.create<arith::AddIOp>(
-            loc, maxLvlCrds[i], constantIndex(rewriter, loc, 1));
+        Value sz = arith::AddIOp::create(rewriter, loc, maxLvlCrds[i],
+                                         constantIndex(rewriter, loc, 1));
         dynLvlSzs.push_back(sz);
       }
     }
@@ -650,8 +651,8 @@ struct TensorInsertDemapper
     auto stt = getSparseTensorType(op.getResult());
     ValueRange lvlCrd = stt.translateCrds(rewriter, loc, op.getIndices(),
                                           CrdTransDirectionKind::dim2lvl);
-    auto insertOp = rewriter.create<tensor::InsertOp>(
-        loc, op.getScalar(), adaptor.getDest(), lvlCrd);
+    auto insertOp = tensor::InsertOp::create(rewriter, loc, op.getScalar(),
+                                             adaptor.getDest(), lvlCrd);
 
     Value out = genRemap(rewriter, stt.getEncoding(), insertOp.getResult());
     rewriter.replaceOp(op, out);
@@ -765,7 +766,7 @@ struct ForeachOpDemapper
           stt && !stt->isIdentity()) {
         Value y =
             genDemap(rewriter, stt->getEncoding(), yield.getSingleResult());
-        rewriter.create<YieldOp>(loc, y);
+        YieldOp::create(rewriter, loc, y);
         rewriter.eraseOp(yield);
       }
     }

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseSpaceCollapse.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseSpaceCollapse.cpp
index f85c4761a8d52..81cd3296de294 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseSpaceCollapse.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseSpaceCollapse.cpp
@@ -126,8 +126,8 @@ void collapseSparseSpace(MutableArrayRef<CollapseSpaceInfo> toCollapse) {
   OpBuilder builder(root);
 
   // Construct the collapsed iteration space.
-  auto collapsedSpace = builder.create<ExtractIterSpaceOp>(
-      loc, root.getTensor(), root.getParentIter(), root.getLoLvl(),
+  auto collapsedSpace = ExtractIterSpaceOp::create(
+      builder, loc, root.getTensor(), root.getParentIter(), root.getLoLvl(),
       leaf.getHiLvl());
 
   auto rItOp = llvm::cast<IterateOp>(*root->getUsers().begin());

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp
index 01028f71c20bb..6dfffbb6e7442 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp
@@ -69,15 +69,15 @@ class SpecifierStructBuilder : public StructBuilder {
   Value extractField(OpBuilder &builder, Location loc,
                      ArrayRef<int64_t> indices) const {
     return genCast(builder, loc,
-                   builder.create<LLVM::ExtractValueOp>(loc, value, indices),
+                   LLVM::ExtractValueOp::create(builder, loc, value, indices),
                    builder.getIndexType());
   }
 
   void insertField(OpBuilder &builder, Location loc, ArrayRef<int64_t> indices,
                    Value v) {
-    value = builder.create<LLVM::InsertValueOp>(
-        loc, value, genCast(builder, loc, v, builder.getIntegerType(64)),
-        indices);
+    value = LLVM::InsertValueOp::create(
+        builder, loc, value,
+        genCast(builder, loc, v, builder.getIntegerType(64)), indices);
   }
 
 public:
@@ -110,7 +110,7 @@ class SpecifierStructBuilder : public StructBuilder {
 
 Value SpecifierStructBuilder::getInitValue(OpBuilder &builder, Location loc,
                                            Type structType, Value source) {
-  Value metaData = builder.create<LLVM::PoisonOp>(loc, structType);
+  Value metaData = LLVM::PoisonOp::create(builder, loc, structType);
   SpecifierStructBuilder md(metaData);
   if (!source) {
     auto memSizeArrayType =
@@ -204,15 +204,15 @@ void SpecifierStructBuilder::setMemSize(OpBuilder &builder, Location loc,
 /// Builds IR extracting the memory size array from the descriptor.
 Value SpecifierStructBuilder::memSizeArray(OpBuilder &builder,
                                            Location loc) const {
-  return builder.create<LLVM::ExtractValueOp>(loc, value,
-                                              kMemSizePosInSpecifier);
+  return LLVM::ExtractValueOp::create(builder, loc, value,
+                                      kMemSizePosInSpecifier);
 }
 
 /// Builds IR inserting the memory size array into the descriptor.
 void SpecifierStructBuilder::setMemSizeArray(OpBuilder &builder, Location loc,
                                              Value array) {
-  value = builder.create<LLVM::InsertValueOp>(loc, value, array,
-                                              kMemSizePosInSpecifier);
+  value = LLVM::InsertValueOp::create(builder, loc, value, array,
+                                      kMemSizePosInSpecifier);
 }
 
 } // namespace

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 001ea62b07360..70795e2eb211b 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -50,7 +50,7 @@ static SmallVector<Value> flattenValues(ArrayRef<ValueRange> values) {
 /// Generates a load with proper `index` typing.
 static Value genLoad(OpBuilder &builder, Location loc, Value mem, Value idx) {
   idx = genCast(builder, loc, idx, builder.getIndexType());
-  return builder.create<memref::LoadOp>(loc, mem, idx);
+  return memref::LoadOp::create(builder, loc, mem, idx);
 }
 
 /// Generates a store with proper `index` typing and proper value.
@@ -59,7 +59,7 @@ static void genStore(OpBuilder &builder, Location loc, Value val, Value mem,
   idx = genCast(builder, loc, idx, builder.getIndexType());
   val = genCast(builder, loc, val,
                 cast<ShapedType>(mem.getType()).getElementType());
-  builder.create<memref::StoreOp>(loc, val, mem, idx);
+  memref::StoreOp::create(builder, loc, val, mem, idx);
 }
 
 /// Creates a straightforward counting for-loop.
@@ -70,7 +70,8 @@ static scf::ForOp createFor(OpBuilder &builder, Location loc, Value upper,
   if (!lower)
     lower = constantZero(builder, loc, indexType);
   Value one = constantOne(builder, loc, indexType);
-  scf::ForOp forOp = builder.create<scf::ForOp>(loc, lower, upper, one, fields);
+  scf::ForOp forOp =
+      scf::ForOp::create(builder, loc, lower, upper, one, fields);
   for (unsigned i = 0, e = fields.size(); i < e; i++)
     fields[i] = forOp.getRegionIterArg(i);
   builder.setInsertionPointToStart(forOp.getBody());
@@ -86,9 +87,9 @@ static void createPushback(OpBuilder &builder, Location loc,
   Value field = desc.getMemRefField(kind, lvl);
   StorageSpecifierKind specFieldKind = toSpecifierKind(kind);
 
-  auto pushBackOp = builder.create<PushBackOp>(
-      loc, desc.getSpecifierField(builder, loc, specFieldKind, lvl), field,
-      genCast(builder, loc, value, etp), repeat);
+  auto pushBackOp = PushBackOp::create(
+      builder, loc, desc.getSpecifierField(builder, loc, specFieldKind, lvl),
+      field, genCast(builder, loc, value, etp), repeat);
 
   desc.setMemRefField(kind, lvl, pushBackOp.getOutBuffer());
   desc.setSpecifierField(builder, loc, specFieldKind, lvl,
@@ -112,7 +113,7 @@ static void allocSchemeForRank(OpBuilder &builder, Location loc,
       Value posZero = constantZero(builder, loc, stt.getPosType());
       if (isLooseCompressedLT(lt)) {
         Value two = constantIndex(builder, loc, 2);
-        linear = builder.create<arith::MulIOp>(loc, linear, two);
+        linear = arith::MulIOp::create(builder, loc, linear, two);
       }
       createPushback(builder, loc, desc, SparseTensorFieldKind::PosMemRef, lvl,
                      /*value=*/posZero, /*repeat=*/linear);
@@ -125,7 +126,7 @@ static void allocSchemeForRank(OpBuilder &builder, Location loc,
     // otherwise the values array for the from-here "all-dense" case.
     assert(isDenseLT(lt));
     Value size = desc.getLvlSize(builder, loc, lvl);
-    linear = builder.create<arith::MulIOp>(loc, linear, size);
+    linear = arith::MulIOp::create(builder, loc, linear, size);
   }
   // Reached values array so prepare for an insertion.
   Value valZero = constantZero(builder, loc, stt.getElementType());
@@ -137,11 +138,11 @@ static void allocSchemeForRank(OpBuilder &builder, Location loc,
 static Value createAllocation(OpBuilder &builder, Location loc,
                               MemRefType memRefType, Value sz,
                               bool enableInit) {
-  Value buffer = builder.create<memref::AllocOp>(loc, memRefType, sz);
+  Value buffer = memref::AllocOp::create(builder, loc, memRefType, sz);
   Type elemType = memRefType.getElementType();
   if (enableInit) {
     Value fillValue = constantZero(builder, loc, elemType);
-    builder.create<linalg::FillOp>(loc, fillValue, buffer);
+    linalg::FillOp::create(builder, loc, fillValue, buffer);
   }
   return buffer;
 }
@@ -178,16 +179,16 @@ static void createAllocFields(OpBuilder &builder, Location loc,
   if (stt.isAllDense()) {
     valHeuristic = lvlSizesValues[0];
     for (Level lvl = 1; lvl < lvlRank; lvl++)
-      valHeuristic =
-          builder.create<arith::MulIOp>(loc, valHeuristic, lvlSizesValues[lvl]);
+      valHeuristic = arith::MulIOp::create(builder, loc, valHeuristic,
+                                           lvlSizesValues[lvl]);
   } else if (sizeHint) {
     if (stt.getAoSCOOStart() == 0) {
       posHeuristic = constantIndex(builder, loc, 2);
-      crdHeuristic = builder.create<arith::MulIOp>(
-          loc, constantIndex(builder, loc, lvlRank), sizeHint); // AOS
+      crdHeuristic = arith::MulIOp::create(
+          builder, loc, constantIndex(builder, loc, lvlRank), sizeHint); // AOS
     } else if (lvlRank == 2 && stt.isDenseLvl(0) && stt.isCompressedLvl(1)) {
-      posHeuristic = builder.create<arith::AddIOp>(
-          loc, sizeHint, constantIndex(builder, loc, 1));
+      posHeuristic = arith::AddIOp::create(builder, loc, sizeHint,
+                                           constantIndex(builder, loc, 1));
       crdHeuristic = sizeHint;
     } else {
       posHeuristic = crdHeuristic = constantIndex(builder, loc, 16);
@@ -280,7 +281,7 @@ static Value genCompressed(OpBuilder &builder, Location loc,
   unsigned crdStride;
   std::tie(crdFidx, crdStride) = desc.getCrdMemRefIndexAndStride(lvl);
   const Value one = constantIndex(builder, loc, 1);
-  const Value pp1 = builder.create<arith::AddIOp>(loc, parentPos, one);
+  const Value pp1 = arith::AddIOp::create(builder, loc, parentPos, one);
   const Value positionsAtLvl = desc.getPosMemRef(lvl);
   const Value pstart = genLoad(builder, loc, positionsAtLvl, parentPos);
   const Value pstop = genLoad(builder, loc, positionsAtLvl, pp1);
@@ -288,29 +289,29 @@ static Value genCompressed(OpBuilder &builder, Location loc,
   const Value crdStrideC =
       crdStride > 1 ? constantIndex(builder, loc, crdStride) : Value();
   const Value msz =
-      crdStrideC ? builder.create<arith::DivUIOp>(loc, crdMsz, crdStrideC)
+      crdStrideC ? arith::DivUIOp::create(builder, loc, crdMsz, crdStrideC)
                  : crdMsz;
-  const Value plast = builder.create<arith::SubIOp>(
-      loc, genCast(builder, loc, pstop, indexType), one);
+  const Value plast = arith::SubIOp::create(
+      builder, loc, genCast(builder, loc, pstop, indexType), one);
   // Conditional expression.
-  Value lt = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult,
-                                           pstart, pstop);
+  Value lt = arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ult,
+                                   pstart, pstop);
   types.push_back(boolType);
-  scf::IfOp ifOp1 = builder.create<scf::IfOp>(loc, types, lt, /*else*/ true);
+  scf::IfOp ifOp1 = scf::IfOp::create(builder, loc, types, lt, /*else*/ true);
   types.pop_back();
   builder.setInsertionPointToStart(&ifOp1.getThenRegion().front());
-  Value crd =
-      genLoad(builder, loc, desc.getMemRefField(crdFidx),
-              crdStrideC ? builder.create<arith::MulIOp>(loc, plast, crdStrideC)
-                         : plast);
-  Value eq = builder.create<arith::CmpIOp>(
-      loc, arith::CmpIPredicate::eq, genCast(builder, loc, crd, indexType),
-      lvlCoords[lvl]);
-  builder.create<scf::YieldOp>(loc, eq);
+  Value crd = genLoad(
+      builder, loc, desc.getMemRefField(crdFidx),
+      crdStrideC ? arith::MulIOp::create(builder, loc, plast, crdStrideC)
+                 : plast);
+  Value eq = arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::eq,
+                                   genCast(builder, loc, crd, indexType),
+                                   lvlCoords[lvl]);
+  scf::YieldOp::create(builder, loc, eq);
   builder.setInsertionPointToStart(&ifOp1.getElseRegion().front());
   if (lvl > 0)
     genStore(builder, loc, msz, positionsAtLvl, parentPos);
-  builder.create<scf::YieldOp>(loc, constantI1(builder, loc, false));
+  scf::YieldOp::create(builder, loc, constantI1(builder, loc, false));
   builder.setInsertionPointAfter(ifOp1);
   // If present construct. Note that for a non-unique dimension level, we
   // simply set the condition to false and rely on CSE/DCE to clean up the IR.
@@ -322,19 +323,19 @@ static Value genCompressed(OpBuilder &builder, Location loc,
   types.push_back(indexType);
   const Value p = stt.isUniqueLvl(lvl) ? ifOp1.getResult(0)
                                        : constantI1(builder, loc, false);
-  scf::IfOp ifOp2 = builder.create<scf::IfOp>(loc, types, p, /*else*/ true);
+  scf::IfOp ifOp2 = scf::IfOp::create(builder, loc, types, p, /*else*/ true);
   // If present (fields unaffected, update pnext to plast).
   builder.setInsertionPointToStart(&ifOp2.getThenRegion().front());
 
   // FIXME: This does not looks like a clean way, but probably the most
   // efficient way.
   desc.getFields().push_back(plast);
-  builder.create<scf::YieldOp>(loc, desc.getFields());
+  scf::YieldOp::create(builder, loc, desc.getFields());
   desc.getFields().pop_back();
 
   // If !present (changes fields, update pnext).
   builder.setInsertionPointToStart(&ifOp2.getElseRegion().front());
-  Value mszp1 = builder.create<arith::AddIOp>(loc, msz, one);
+  Value mszp1 = arith::AddIOp::create(builder, loc, msz, one);
   genStore(builder, loc, mszp1, positionsAtLvl, pp1);
   createPushback(builder, loc, desc, SparseTensorFieldKind::CrdMemRef, lvl,
                  /*value=*/lvlCoords[lvl]);
@@ -343,7 +344,7 @@ static Value genCompressed(OpBuilder &builder, Location loc,
     allocSchemeForRank(builder, loc, desc, lvl + 1);
 
   desc.getFields().push_back(msz);
-  builder.create<scf::YieldOp>(loc, desc.getFields());
+  scf::YieldOp::create(builder, loc, desc.getFields());
   desc.getFields().pop_back();
 
   // Update fields and return next pos.
@@ -381,17 +382,17 @@ static void genEndInsert(OpBuilder &builder, Location loc,
         Value oldv = loop.getRegionIterArg(0);
         Value newv = genLoad(builder, loc, posMemRef, i);
         Value posZero = constantZero(builder, loc, posType);
-        Value cond = builder.create<arith::CmpIOp>(
-            loc, arith::CmpIPredicate::eq, newv, posZero);
-        scf::IfOp ifOp = builder.create<scf::IfOp>(loc, TypeRange(posType),
-                                                   cond, /*else*/ true);
+        Value cond = arith::CmpIOp::create(
+            builder, loc, arith::CmpIPredicate::eq, newv, posZero);
+        scf::IfOp ifOp = scf::IfOp::create(builder, loc, TypeRange(posType),
+                                           cond, /*else*/ true);
         builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
         genStore(builder, loc, oldv, posMemRef, i);
-        builder.create<scf::YieldOp>(loc, oldv);
+        scf::YieldOp::create(builder, loc, oldv);
         builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-        builder.create<scf::YieldOp>(loc, newv);
+        scf::YieldOp::create(builder, loc, newv);
         builder.setInsertionPointAfter(ifOp);
-        builder.create<scf::YieldOp>(loc, ifOp.getResult(0));
+        scf::YieldOp::create(builder, loc, ifOp.getResult(0));
         builder.setInsertionPointAfter(loop);
       }
     } else {
@@ -484,7 +485,7 @@ class SparseInsertGenerator
         //   <insert @ positions[lvl] at next level lvl + 1>
         if (isLooseCompressedLT(lt)) {
           Value two = constantIndex(builder, loc, 2);
-          parentPos = builder.create<arith::MulIOp>(loc, parentPos, two);
+          parentPos = arith::MulIOp::create(builder, loc, parentPos, two);
         }
         parentPos =
             genCompressed(builder, loc, desc, coords, value, parentPos, lvl);
@@ -501,8 +502,8 @@ class SparseInsertGenerator
         //   positions[lvl] = size * positions[lvl-1] + coords[lvl]
         //   <insert @ positions[lvl] at next level lvl + 1>
         Value size = desc.getLvlSize(builder, loc, lvl);
-        Value mult = builder.create<arith::MulIOp>(loc, size, parentPos);
-        parentPos = builder.create<arith::AddIOp>(loc, mult, coords[lvl]);
+        Value mult = arith::MulIOp::create(builder, loc, size, parentPos);
+        parentPos = arith::AddIOp::create(builder, loc, mult, coords[lvl]);
       }
     }
     // Reached the actual value append/insert.
@@ -582,8 +583,9 @@ class SparseCallConverter : public OpConversionPattern<func::CallOp> {
       return failure();
 
     // (1) Generates new call with flattened return value.
-    auto newCall = rewriter.create<func::CallOp>(
-        loc, op.getCallee(), finalRetTy, flattenValues(adaptor.getOperands()));
+    auto newCall =
+        func::CallOp::create(rewriter, loc, op.getCallee(), finalRetTy,
+                             flattenValues(adaptor.getOperands()));
     // (2) Gather sparse tensor returns.
     SmallVector<SmallVector<Value>> packedResultVals;
     // Tracks the offset of current return value (of the original call)
@@ -671,8 +673,8 @@ struct SparseReorderCOOConverter : public OpConversionPattern<ReorderCOOOp> {
 
     auto id = AffineMap::getMultiDimIdentityMap(srcStt.getLvlRank(), ctx);
 
-    rewriter.create<SortOp>(loc, nnz, crd, ValueRange{val}, id,
-                            rewriter.getIndexAttr(0), op.getAlgorithm());
+    SortOp::create(rewriter, loc, nnz, crd, ValueRange{val}, id,
+                   rewriter.getIndexAttr(0), op.getAlgorithm());
 
     // Since we do in-place sorting, the destinate tensor will have the same set
     // of memrefs as the source tensor.
@@ -757,10 +759,10 @@ class SparseTensorAllocConverter
       // Memcpy on memref fields.
       for (auto field : desc.getMemRefFields()) {
         auto memrefTp = cast<MemRefType>(field.getType());
-        auto size = rewriter.create<memref::DimOp>(loc, field, 0);
+        auto size = memref::DimOp::create(rewriter, loc, field, 0);
         auto copied =
-            rewriter.create<memref::AllocOp>(loc, memrefTp, ValueRange{size});
-        rewriter.create<memref::CopyOp>(loc, field, copied);
+            memref::AllocOp::create(rewriter, loc, memrefTp, ValueRange{size});
+        memref::CopyOp::create(rewriter, loc, field, copied);
         fields.push_back(copied);
       }
       // Reuses specifier.
@@ -863,7 +865,7 @@ class SparseTensorDeallocConverter
           cast<RankedTensorType>(op.getTensor().getType()));
       for (auto input : desc.getMemRefFields())
         // Deallocate every buffer used to store the sparse tensor handler.
-        rewriter.create<memref::DeallocOp>(loc, input);
+        memref::DeallocOp::create(rewriter, loc, input);
     }
     rewriter.eraseOp(op);
     return success();
@@ -917,7 +919,7 @@ class SparseExpandConverter : public OpConversionPattern<ExpandOp> {
     // Generate a memref for `sz` elements of type `t`.
     const auto genAlloc = [&](Type t) {
       const auto memTp = MemRefType::get({ShapedType::kDynamic}, t);
-      return rewriter.create<memref::AllocOp>(loc, memTp, ValueRange{sz});
+      return memref::AllocOp::create(rewriter, loc, memTp, ValueRange{sz});
     };
     // Allocate temporary buffers for values/filled-switch and added.
     // We do not use stack buffers for this, since the expanded size may
@@ -931,12 +933,12 @@ class SparseExpandConverter : public OpConversionPattern<ExpandOp> {
     // operation is amortized over the innermost loops for the access
     // pattern expansion. As noted in the operation doc, we would like
     // to amortize this setup cost even between kernels.
-    rewriter.create<linalg::FillOp>(
-        loc, ValueRange{constantZero(rewriter, loc, eltType)},
-        ValueRange{values});
-    rewriter.create<linalg::FillOp>(
-        loc, ValueRange{constantZero(rewriter, loc, boolType)},
-        ValueRange{filled});
+    linalg::FillOp::create(rewriter, loc,
+                           ValueRange{constantZero(rewriter, loc, eltType)},
+                           ValueRange{values});
+    linalg::FillOp::create(rewriter, loc,
+                           ValueRange{constantZero(rewriter, loc, boolType)},
+                           ValueRange{filled});
     // Replace expansion op with these buffers and initial coordinate.
     assert(op.getNumResults() == 4);
     rewriter.replaceOp(op, {values, filled, added, zero});
@@ -965,9 +967,10 @@ class SparseCompressConverter : public OpConversionPattern<CompressOp> {
     // If the innermost level is ordered, we need to sort the coordinates
     // in the "added" array prior to applying the compression.
     if (dstType.isOrderedLvl(dstType.getLvlRank() - 1))
-      rewriter.create<SortOp>(
-          loc, count, added, ValueRange{}, rewriter.getMultiDimIdentityMap(1),
-          rewriter.getIndexAttr(0), SparseTensorSortKind::HybridQuickSort);
+      SortOp::create(rewriter, loc, count, added, ValueRange{},
+                     rewriter.getMultiDimIdentityMap(1),
+                     rewriter.getIndexAttr(0),
+                     SparseTensorSortKind::HybridQuickSort);
     // While performing the insertions, we also need to reset the elements
     // of the values/filled-switch by only iterating over the set elements,
     // to ensure that the runtime complexity remains proportional to the
@@ -1000,15 +1003,15 @@ class SparseCompressConverter : public OpConversionPattern<CompressOp> {
     SmallVector<Value> insertRet = insertGen.genCallOrInline(rewriter, loc);
     genStore(rewriter, loc, constantZero(rewriter, loc, eltType), values, crd);
     genStore(rewriter, loc, constantI1(rewriter, loc, false), filled, crd);
-    rewriter.create<scf::YieldOp>(loc, insertRet);
+    scf::YieldOp::create(rewriter, loc, insertRet);
 
     rewriter.setInsertionPointAfter(loop);
     // Deallocate the buffers on exit of the full loop nest.
     Operation *parent = getTop(op);
     rewriter.setInsertionPointAfter(parent);
-    rewriter.create<memref::DeallocOp>(loc, values);
-    rewriter.create<memref::DeallocOp>(loc, filled);
-    rewriter.create<memref::DeallocOp>(loc, added);
+    memref::DeallocOp::create(rewriter, loc, values);
+    memref::DeallocOp::create(rewriter, loc, filled);
+    memref::DeallocOp::create(rewriter, loc, added);
     // Replace operation with resulting memrefs.
     rewriter.replaceOpWithMultiple(op, {loop->getResults()});
     return success();
@@ -1192,8 +1195,8 @@ class SparseConvertConverter : public OpConversionPattern<ConvertOp> {
             // would require a subViewOp to avoid overflow when copying
             // values.
             Value sz = linalg::createOrFoldDimOp(rewriter, loc, srcMem, 0);
-            auto dstMem = rewriter.create<memref::AllocOp>(
-                loc, cast<MemRefType>(fTp), sz);
+            auto dstMem = memref::AllocOp::create(rewriter, loc,
+                                                  cast<MemRefType>(fTp), sz);
             if (fTp != srcMem.getType()) {
               // Converts elements type.
               scf::buildLoopNest(
@@ -1201,16 +1204,16 @@ class SparseConvertConverter : public OpConversionPattern<ConvertOp> {
                   constantIndex(rewriter, loc, 1),
                   [srcMem, &dstMem](OpBuilder &builder, Location loc,
                                     ValueRange ivs) {
-                    Value v = builder.create<memref::LoadOp>(loc, srcMem, ivs);
+                    Value v = memref::LoadOp::create(builder, loc, srcMem, ivs);
                     Value casted = genCast(builder, loc, v,
                                            dstMem.getType().getElementType());
-                    builder.create<memref::StoreOp>(loc, casted, dstMem, ivs);
+                    memref::StoreOp::create(builder, loc, casted, dstMem, ivs);
                   });
             } else {
               // TODO: We can even reuse the same memref for the new tensor,
               // but that requires a `ref-counting` based memory management
               // for shared memrefs between multiple sparse tensors.
-              rewriter.create<memref::CopyOp>(loc, srcMem, dstMem);
+              memref::CopyOp::create(rewriter, loc, srcMem, dstMem);
             }
             fields.push_back(dstMem);
           }
@@ -1242,8 +1245,9 @@ class SparseExtractSliceConverter
     auto desc = getMutDescriptorFromTensorTuple(adaptor.getSource(), fields,
                                                 op.getSource().getType());
 
-    auto newSpec = rewriter.create<StorageSpecifierInitOp>(
-        loc, StorageSpecifierType::get(ctx, dstEnc), desc.getSpecifier());
+    auto newSpec = StorageSpecifierInitOp::create(
+        rewriter, loc, StorageSpecifierType::get(ctx, dstEnc),
+        desc.getSpecifier());
     desc.setSpecifier(newSpec);
 
     // Fills in slice information.
@@ -1326,11 +1330,11 @@ struct SparseAssembleOpConverter : public OpConversionPattern<AssembleOp> {
               // Flattens the buffer to batchLvlRank.
               auto reassoc = getReassociationForFlattening(
                   mem.getType(), stt.getBatchLvlRank());
-              mem = rewriter.create<memref::CastOp>(
-                  loc, fType,
-                  rewriter.create<memref::CollapseShapeOp>(loc, mem, reassoc));
+              mem = memref::CastOp::create(
+                  rewriter, loc, fType,
+                  memref::CollapseShapeOp::create(rewriter, loc, mem, reassoc));
             } else {
-              mem = rewriter.create<memref::CastOp>(loc, fType, mem);
+              mem = memref::CastOp::create(rewriter, loc, fType, mem);
             }
             fields.push_back(mem);
           }
@@ -1362,8 +1366,8 @@ struct SparseAssembleOpConverter : public OpConversionPattern<AssembleOp> {
       LevelType lt = stt.getLvlType(lvl);
       // Simply forwards the position index when this is a dense level.
       if (lt.isa<LevelFormat::Dense>()) {
-        memSize = rewriter.create<arith::MulIOp>(loc, lvlSize, memSize);
-        posBack = rewriter.create<arith::SubIOp>(loc, memSize, c1);
+        memSize = arith::MulIOp::create(rewriter, loc, lvlSize, memSize);
+        posBack = arith::SubIOp::create(rewriter, loc, memSize, c1);
         continue;
       }
       if (lt.isa<LevelFormat::Batch>()) {
@@ -1376,12 +1380,12 @@ struct SparseAssembleOpConverter : public OpConversionPattern<AssembleOp> {
       if (isWithPosLT(lt)) {
         assert(isCompressedLT(lt) || isLooseCompressedLT(lt));
         if (isLooseCompressedLT(lt)) {
-          memSize = rewriter.create<arith::MulIOp>(loc, memSize, c2);
-          posBack = rewriter.create<arith::SubIOp>(loc, memSize, c1);
+          memSize = arith::MulIOp::create(rewriter, loc, memSize, c2);
+          posBack = arith::SubIOp::create(rewriter, loc, memSize, c1);
         } else {
           assert(isCompressedLT(lt));
           posBack = memSize;
-          memSize = rewriter.create<arith::AddIOp>(loc, memSize, c1);
+          memSize = arith::AddIOp::create(rewriter, loc, memSize, c1);
         }
         desc.setPosMemSize(rewriter, loc, lvl, memSize);
         // The last value in position array is the memory size for next level.
@@ -1391,13 +1395,13 @@ struct SparseAssembleOpConverter : public OpConversionPattern<AssembleOp> {
                                    constantIndex(rewriter, loc, 0));
         batched.push_back(posBack);
         memSize = genIndexLoad(rewriter, loc, desc.getPosMemRef(lvl), batched);
-        posBack = rewriter.create<arith::SubIOp>(loc, posBack, c1);
+        posBack = arith::SubIOp::create(rewriter, loc, posBack, c1);
       }
       assert(isWithCrdLT(lt) && lvl <= trailCOOStart);
       // FIXME: This seems to be unnecessarily complex, can we simplify it?
       if (lvl == trailCOOStart) {
-        Value cooSz = rewriter.create<arith::MulIOp>(
-            loc, memSize, constantIndex(rewriter, loc, trailCOORank));
+        Value cooSz = arith::MulIOp::create(
+            rewriter, loc, memSize, constantIndex(rewriter, loc, trailCOORank));
         desc.setCrdMemSize(rewriter, loc, lvl, cooSz);
       } else {
         desc.setCrdMemSize(rewriter, loc, lvl, memSize);
@@ -1460,19 +1464,20 @@ struct SparseDisassembleOpConverter
       if (dst.getType().getRank() > stt.getBatchLvlRank() + 1) {
         auto reassoc =
             getReassociationForFlattening(dst.getType(), stt.getBatchLvlRank());
-        flatOut = rewriter.create<memref::CollapseShapeOp>(loc, dst, reassoc);
+        flatOut = memref::CollapseShapeOp::create(rewriter, loc, dst, reassoc);
       }
       Value dstMem = genSliceToSize(rewriter, loc, flatOut, sz);
       Value srcMem = genSliceToSize(rewriter, loc, src, sz);
-      rewriter.create<memref::CopyOp>(loc, srcMem, dstMem);
+      memref::CopyOp::create(rewriter, loc, srcMem, dstMem);
       return true;
     });
 
     // Converts MemRefs back to Tensors.
     SmallVector<Value> retValues = llvm::to_vector(
         llvm::map_range(retMem, [&rewriter, loc](Value v) -> Value {
-          return rewriter.create<bufferization::ToTensorOp>(
-              loc, memref::getTensorTypeFromMemRefType(v.getType()), v);
+          return bufferization::ToTensorOp::create(
+              rewriter, loc, memref::getTensorTypeFromMemRefType(v.getType()),
+              v);
         }));
     // Appends the actual memory length used in each buffer returned.
     retValues.append(retLen.begin(), retLen.end());
@@ -1549,15 +1554,15 @@ struct SparseNewConverter : public OpConversionPattern<NewOp> {
     const Level lvlRank = dstTp.getLvlRank();
     if (dstTp.isOrderedLvl(lvlRank - 1)) {
       Value kFalse = constantI1(rewriter, loc, false);
-      Value notSorted = rewriter.create<arith::CmpIOp>(
-          loc, arith::CmpIPredicate::eq, isSorted, kFalse);
+      Value notSorted = arith::CmpIOp::create(
+          rewriter, loc, arith::CmpIPredicate::eq, isSorted, kFalse);
       scf::IfOp ifOp =
-          rewriter.create<scf::IfOp>(loc, notSorted, /*else*/ false);
+          scf::IfOp::create(rewriter, loc, notSorted, /*else*/ false);
       rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
       auto xPerm = rewriter.getMultiDimIdentityMap(lvlRank);
-      rewriter.create<SortOp>(loc, nse, xs, ValueRange{ys}, xPerm,
-                              rewriter.getIndexAttr(0),
-                              SparseTensorSortKind::HybridQuickSort);
+      SortOp::create(rewriter, loc, nse, xs, ValueRange{ys}, xPerm,
+                     rewriter.getIndexAttr(0),
+                     SparseTensorSortKind::HybridQuickSort);
       rewriter.setInsertionPointAfter(ifOp);
     }
 
@@ -1566,11 +1571,11 @@ struct SparseNewConverter : public OpConversionPattern<NewOp> {
     const Value posMemref0 = desc.getPosMemRef(0);
     const Type posTp = dstTp.getPosType();
     const Value posNse = genCast(rewriter, loc, nse, posTp);
-    rewriter.create<memref::StoreOp>(loc, posNse, posMemref0, c1);
+    memref::StoreOp::create(rewriter, loc, posNse, posMemref0, c1);
 
     // Update storage specifier.
-    Value coordinatesSize = rewriter.create<arith::MulIOp>(
-        loc, nse, constantIndex(rewriter, loc, lvlRank));
+    Value coordinatesSize = arith::MulIOp::create(
+        rewriter, loc, nse, constantIndex(rewriter, loc, lvlRank));
     desc.setSpecifierField(rewriter, loc, StorageSpecifierKind::CrdMemSize, 0,
                            coordinatesSize);
     desc.setSpecifierField(rewriter, loc, StorageSpecifierKind::ValMemSize,

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 50ccb43d432b6..134aef3a6c719 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -137,7 +137,7 @@ static SmallVector<Value> getDimSizes(OpBuilder &builder, Location loc,
 /// this buffer must be explicitly deallocated by client.
 static Value genAlloc(RewriterBase &rewriter, Location loc, Value sz, Type tp) {
   auto memTp = MemRefType::get({ShapedType::kDynamic}, tp);
-  return rewriter.create<memref::AllocOp>(loc, memTp, ValueRange{sz});
+  return memref::AllocOp::create(rewriter, loc, memTp, ValueRange{sz});
 }
 
 /// Generates a temporary buffer for the level-types of the given encoding.
@@ -154,7 +154,7 @@ static Value genLvlTypesBuffer(OpBuilder &builder, Location loc,
 static Value extractBarePtrFromTensor(OpBuilder &builder, Location loc,
                                       Value tensor) {
   auto buf = genToMemref(builder, loc, tensor);
-  return builder.create<memref::ExtractAlignedPointerAsIndexOp>(loc, buf);
+  return memref::ExtractAlignedPointerAsIndexOp::create(builder, loc, buf);
 }
 
 /// Generates a temporary buffer for the level-types of the given encoding.
@@ -168,12 +168,12 @@ static Value genLvlPtrsBuffers(OpBuilder &builder, Location loc,
 
   // Passing in value buffer pointers.
   lvlBarePtrs.push_back(extractBarePtrFromTensor(builder, loc, valTensor));
-  Value idxPtr = builder.create<memref::ExtractAlignedPointerAsIndexOp>(
-      loc, allocaBuffer(builder, loc, lvlBarePtrs));
+  Value idxPtr = memref::ExtractAlignedPointerAsIndexOp::create(
+      builder, loc, allocaBuffer(builder, loc, lvlBarePtrs));
   Value idxCast =
-      builder.create<arith::IndexCastOp>(loc, builder.getI64Type(), idxPtr);
-  return builder.create<LLVM::IntToPtrOp>(loc, getOpaquePointerType(builder),
-                                          idxCast);
+      arith::IndexCastOp::create(builder, loc, builder.getI64Type(), idxPtr);
+  return LLVM::IntToPtrOp::create(builder, loc, getOpaquePointerType(builder),
+                                  idxCast);
 }
 
 /// This class abstracts over the API of `_mlir_ciface_newSparseTensor`:
@@ -227,7 +227,7 @@ class NewCallParams final {
     assert(isInitialized() && "Must initialize before genNewCall");
     StringRef name = "newSparseTensor";
     params[kParamAction] = constantAction(builder, loc, action);
-    params[kParamPtr] = ptr ? ptr : builder.create<LLVM::ZeroOp>(loc, pTp);
+    params[kParamPtr] = ptr ? ptr : LLVM::ZeroOp::create(builder, loc, pTp);
     return createFuncCall(builder, loc, name, pTp, params, EmitCInterface::On)
         .getResult(0);
   }
@@ -539,7 +539,7 @@ class SparseTensorToCoordinatesConverter
     // Cast the MemRef type to the type expected by the users, though these
     // two types should be compatible at runtime.
     if (op.getType() != crds.getType())
-      crds = rewriter.create<memref::CastOp>(loc, op.getType(), crds);
+      crds = memref::CastOp::create(rewriter, loc, op.getType(), crds);
     rewriter.replaceOp(op, crds);
     return success();
   }
@@ -560,7 +560,7 @@ class SparseToCoordinatesBufferConverter
     // Cast the MemRef type to the type expected by the users, though these
     // two types should be compatible at runtime.
     if (op.getType() != crds.getType())
-      crds = rewriter.create<memref::CastOp>(loc, op.getType(), crds);
+      crds = memref::CastOp::create(rewriter, loc, op.getType(), crds);
     rewriter.replaceOp(op, crds);
     return success();
   }
@@ -652,7 +652,7 @@ class SparseTensorInsertConverter
       vref = genAllocaScalar(rewriter, loc, elemTp);
     }
     storeAll(rewriter, loc, lvlCoords, adaptor.getIndices());
-    rewriter.create<memref::StoreOp>(loc, adaptor.getScalar(), vref);
+    memref::StoreOp::create(rewriter, loc, adaptor.getScalar(), vref);
     SmallString<12> name{"lexInsert", primaryTypeFunctionSuffix(elemTp)};
     createFuncCall(rewriter, loc, name, {},
                    {adaptor.getDest(), lvlCoords, vref}, EmitCInterface::On);
@@ -690,12 +690,12 @@ class SparseTensorExpandConverter : public OpConversionPattern<ExpandOp> {
     // operation is amortized over the innermost loops for the access
     // pattern expansion. As noted in the operation doc, we would like
     // to amortize this setup cost even between kernels.
-    rewriter.create<linalg::FillOp>(
-        loc, ValueRange{constantZero(rewriter, loc, eltType)},
-        ValueRange{values});
-    rewriter.create<linalg::FillOp>(
-        loc, ValueRange{constantZero(rewriter, loc, boolType)},
-        ValueRange{filled});
+    linalg::FillOp::create(rewriter, loc,
+                           ValueRange{constantZero(rewriter, loc, eltType)},
+                           ValueRange{values});
+    linalg::FillOp::create(rewriter, loc,
+                           ValueRange{constantZero(rewriter, loc, boolType)},
+                           ValueRange{filled});
     // Replace expansion op with these buffers and initial coordinate.
     assert(op.getNumResults() == 4);
     rewriter.replaceOp(op, {values, filled, lastLvlCoordinates, zero});
@@ -733,9 +733,9 @@ class SparseTensorCompressConverter : public OpConversionPattern<CompressOp> {
     rewriter.replaceOp(op, adaptor.getTensor());
     // Deallocate the buffers on exit of the loop nest.
     rewriter.setInsertionPointAfter(parent);
-    rewriter.create<memref::DeallocOp>(loc, values);
-    rewriter.create<memref::DeallocOp>(loc, filled);
-    rewriter.create<memref::DeallocOp>(loc, added);
+    memref::DeallocOp::create(rewriter, loc, values);
+    memref::DeallocOp::create(rewriter, loc, filled);
+    memref::DeallocOp::create(rewriter, loc, added);
     return success();
   }
 };
@@ -837,21 +837,21 @@ class SparseTensorDisassembleConverter
                                       cooStartLvl + 1);
       auto crdLen = linalg::createOrFoldDimOp(rewriter, loc, crds0, 0);
       auto two = constantIndex(rewriter, loc, 2);
-      auto bufLen = rewriter.create<arith::MulIOp>(loc, crdLen, two);
+      auto bufLen = arith::MulIOp::create(rewriter, loc, crdLen, two);
       Type indexType = rewriter.getIndexType();
       auto zero = constantZero(rewriter, loc, indexType);
       auto one = constantOne(rewriter, loc, indexType);
-      scf::ForOp forOp = rewriter.create<scf::ForOp>(loc, zero, crdLen, one);
+      scf::ForOp forOp = scf::ForOp::create(rewriter, loc, zero, crdLen, one);
       auto idx = forOp.getInductionVar();
       rewriter.setInsertionPointToStart(forOp.getBody());
-      auto c0 = rewriter.create<memref::LoadOp>(loc, crds0, idx);
-      auto c1 = rewriter.create<memref::LoadOp>(loc, crds1, idx);
+      auto c0 = memref::LoadOp::create(rewriter, loc, crds0, idx);
+      auto c1 = memref::LoadOp::create(rewriter, loc, crds1, idx);
       SmallVector<Value> args;
       args.push_back(idx);
       args.push_back(zero);
-      rewriter.create<memref::StoreOp>(loc, c0, buf, args);
+      memref::StoreOp::create(rewriter, loc, c0, buf, args);
       args[1] = one;
-      rewriter.create<memref::StoreOp>(loc, c1, buf, args);
+      memref::StoreOp::create(rewriter, loc, c1, buf, args);
       rewriter.setInsertionPointAfter(forOp);
       auto bufLenTp = op.getLvlLens().getTypes()[retLen.size()];
       retVal.push_back(buf);
@@ -867,11 +867,11 @@ class SparseTensorDisassembleConverter
     // Converts MemRefs back to Tensors.
     assert(retVal.size() + retLen.size() == op.getNumResults());
     for (unsigned i = 0, sz = retVal.size(); i < sz; i++) {
-      auto tensor = rewriter.create<bufferization::ToTensorOp>(
-          loc, memref::getTensorTypeFromMemRefType(retVal[i].getType()),
-          retVal[i]);
+      auto tensor = bufferization::ToTensorOp::create(
+          rewriter, loc,
+          memref::getTensorTypeFromMemRefType(retVal[i].getType()), retVal[i]);
       retVal[i] =
-          rewriter.create<tensor::CastOp>(loc, op.getResultTypes()[i], tensor);
+          tensor::CastOp::create(rewriter, loc, op.getResultTypes()[i], tensor);
     }
 
     // Appends the actual memory length used in each buffer returned.

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index d4a02bf7a70b6..b444ac5ba1285 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -127,7 +127,7 @@ static void sizesForTensor(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
   for (const auto &d : enumerate(stp.getShape())) {
     Value dim;
     if (d.value() == ShapedType::kDynamic)
-      dim = builder.create<tensor::DimOp>(loc, tensor, d.index());
+      dim = tensor::DimOp::create(builder, loc, tensor, d.index());
     else
       dim = constantIndex(builder, loc, d.value());
     sizes.push_back(dim);
@@ -198,7 +198,7 @@ static void concatSizesFromInputs(OpBuilder &builder,
     for (const auto &src : srcs.drop_front()) {
       Value srcSz = linalg::createOrFoldDimOp(builder, loc, src, dim);
       // Sum up all the sizes.
-      sizes[dim] = builder.create<arith::AddIOp>(loc, sizes[dim], srcSz);
+      sizes[dim] = arith::AddIOp::create(builder, loc, sizes[dim], srcSz);
     }
   }
 }
@@ -405,8 +405,8 @@ struct FuseSparseMultiplyOverAdd : public OpRewritePattern<GenericOp> {
     inputOps.push_back(op.getDpsInputOperand(1 - other)->get());
     fusedIndexMaps.push_back(fusedIndexMaps.back()); // mimic other
     // Fuse producer and consumer into a new generic op.
-    auto fusedOp = rewriter.create<GenericOp>(
-        loc, op.getResult(0).getType(), inputOps, outputOps,
+    auto fusedOp = GenericOp::create(
+        rewriter, loc, op.getResult(0).getType(), inputOps, outputOps,
         rewriter.getAffineMapArrayAttr(fusedIndexMaps), prod.getIteratorTypes(),
         /*doc=*/nullptr, /*library_call=*/nullptr);
     Block &prodBlock = prod.getRegion().front();
@@ -430,7 +430,7 @@ struct FuseSparseMultiplyOverAdd : public OpRewritePattern<GenericOp> {
     mapper.map(consBlock.getArgument(other), fusedBlock->back().getResult(0));
     mapper.map(last, rewriter.clone(*sampler, mapper)->getResult(0));
     last = rewriter.clone(*acc, mapper)->getResult(0);
-    rewriter.create<linalg::YieldOp>(loc, last);
+    linalg::YieldOp::create(rewriter, loc, last);
     // Force initial value on merged allocation for dense outputs.
     // TODO: deal with non alloc tensor here one day
     if (!getSparseTensorEncoding(op.getResult(0).getType())) {
@@ -534,7 +534,7 @@ struct GenSemiRingSelect : public OpRewritePattern<GenericOp> {
       assert(t.getType() == f.getType());
       auto selTp = t.getType();
       auto c0 = constantZero(rewriter, loc, selTp);
-      auto binOp = rewriter.create<sparse_tensor::BinaryOp>(loc, selTp, t, f);
+      auto binOp = sparse_tensor::BinaryOp::create(rewriter, loc, selTp, t, f);
       // Initializes all the blocks.
       rewriter.createBlock(&binOp.getOverlapRegion(), {}, {selTp, selTp},
                            {t.getLoc(), f.getLoc()});
@@ -564,7 +564,7 @@ struct GenSemiRingSelect : public OpRewritePattern<GenericOp> {
           irMap.map(f, b->getArgument(1));
         }
         auto y = rewriter.clone(inst, irMap)->getResult(0);
-        rewriter.create<sparse_tensor::YieldOp>(loc, y);
+        sparse_tensor::YieldOp::create(rewriter, loc, y);
       }
 
       // We successfully rewrited a operation. We can not do replacement here
@@ -674,29 +674,29 @@ struct GenSemiRingReduction : public OpRewritePattern<GenericOp> {
     // Identity.
     Location loc = op.getLoc();
     Value identity =
-        rewriter.create<tensor::ExtractOp>(loc, init->get(), ValueRange());
+        tensor::ExtractOp::create(rewriter, loc, init->get(), ValueRange());
     // Unary {
     //    present -> value
     //    absent  -> zero.
     // }
     Type rtp = s0.getType();
     rewriter.setInsertionPointToStart(&op.getRegion().front());
-    auto semiring = rewriter.create<sparse_tensor::UnaryOp>(loc, rtp, s0);
+    auto semiring = sparse_tensor::UnaryOp::create(rewriter, loc, rtp, s0);
     Block *present =
         rewriter.createBlock(&semiring.getPresentRegion(), {}, rtp, loc);
     rewriter.setInsertionPointToStart(&semiring.getPresentRegion().front());
-    rewriter.create<sparse_tensor::YieldOp>(loc, present->getArgument(0));
+    sparse_tensor::YieldOp::create(rewriter, loc, present->getArgument(0));
     rewriter.createBlock(&semiring.getAbsentRegion(), {}, {}, {});
     rewriter.setInsertionPointToStart(&semiring.getAbsentRegion().front());
     auto zero =
-        rewriter.create<arith::ConstantOp>(loc, rewriter.getZeroAttr(rtp));
-    rewriter.create<sparse_tensor::YieldOp>(loc, zero);
+        arith::ConstantOp::create(rewriter, loc, rewriter.getZeroAttr(rtp));
+    sparse_tensor::YieldOp::create(rewriter, loc, zero);
     rewriter.setInsertionPointAfter(semiring);
     // CustomReduce {
     //    x = x REDUC y, identity
     // }
-    auto custom = rewriter.create<sparse_tensor::ReduceOp>(
-        loc, rtp, semiring.getResult(), s1, identity);
+    auto custom = sparse_tensor::ReduceOp::create(
+        rewriter, loc, rtp, semiring.getResult(), s1, identity);
     Block *region =
         rewriter.createBlock(&custom.getRegion(), {}, {rtp, rtp}, {loc, loc});
     rewriter.setInsertionPointToStart(&custom.getRegion().front());
@@ -704,7 +704,7 @@ struct GenSemiRingReduction : public OpRewritePattern<GenericOp> {
     irMap.map(red->getOperand(0), region->getArgument(0));
     irMap.map(red->getOperand(1), region->getArgument(1));
     auto *cloned = rewriter.clone(*red, irMap);
-    rewriter.create<sparse_tensor::YieldOp>(loc, cloned->getResult(0));
+    sparse_tensor::YieldOp::create(rewriter, loc, cloned->getResult(0));
     rewriter.setInsertionPointAfter(custom);
     rewriter.replaceOp(red, custom.getResult());
     return success();
@@ -723,14 +723,15 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
     auto tensor = op.getTensor();
     auto stt = getSparseTensorType(tensor);
     // Header with NSE.
-    auto nse = rewriter.create<NumberOfEntriesOp>(loc, tensor);
-    rewriter.create<vector::PrintOp>(
-        loc, rewriter.getStringAttr("---- Sparse Tensor ----\nnse = "));
-    rewriter.create<vector::PrintOp>(loc, nse);
+    auto nse = NumberOfEntriesOp::create(rewriter, loc, tensor);
+    vector::PrintOp::create(
+        rewriter, loc,
+        rewriter.getStringAttr("---- Sparse Tensor ----\nnse = "));
+    vector::PrintOp::create(rewriter, loc, nse);
     // Print run-time contents for dim/lvl sizes.
-    rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("dim = "));
+    vector::PrintOp::create(rewriter, loc, rewriter.getStringAttr("dim = "));
     printSizes(rewriter, loc, tensor, stt.getDimRank(), /*isDim=*/true);
-    rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("lvl = "));
+    vector::PrintOp::create(rewriter, loc, rewriter.getStringAttr("lvl = "));
     printSizes(rewriter, loc, tensor, stt.getLvlRank(), /*isDim=*/false);
     // Use the "codegen" foreach loop construct to iterate over
     // all typical sparse tensor components for printing.
@@ -744,42 +745,42 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
       }
       case SparseTensorFieldKind::PosMemRef: {
         auto lvl = constantIndex(rewriter, loc, l);
-        rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("pos["));
-        rewriter.create<vector::PrintOp>(
-            loc, lvl, vector::PrintPunctuation::NoPunctuation);
-        rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : "));
-        auto pos = rewriter.create<ToPositionsOp>(loc, tensor, l);
+        vector::PrintOp::create(rewriter, loc, rewriter.getStringAttr("pos["));
+        vector::PrintOp::create(rewriter, loc, lvl,
+                                vector::PrintPunctuation::NoPunctuation);
+        vector::PrintOp::create(rewriter, loc, rewriter.getStringAttr("] : "));
+        auto pos = ToPositionsOp::create(rewriter, loc, tensor, l);
         printContents(rewriter, loc, pos);
         break;
       }
       case SparseTensorFieldKind::CrdMemRef: {
         auto lvl = constantIndex(rewriter, loc, l);
-        rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("crd["));
-        rewriter.create<vector::PrintOp>(
-            loc, lvl, vector::PrintPunctuation::NoPunctuation);
-        rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : "));
+        vector::PrintOp::create(rewriter, loc, rewriter.getStringAttr("crd["));
+        vector::PrintOp::create(rewriter, loc, lvl,
+                                vector::PrintPunctuation::NoPunctuation);
+        vector::PrintOp::create(rewriter, loc, rewriter.getStringAttr("] : "));
         Value crd = nullptr;
         // For COO AoS storage, we want to print a single, linear view of
         // the full coordinate storage at this level. For any other storage,
         // we show the coordinate storage for every indivual level.
         if (stt.getAoSCOOStart() == l)
-          crd = rewriter.create<ToCoordinatesBufferOp>(loc, tensor);
+          crd = ToCoordinatesBufferOp::create(rewriter, loc, tensor);
         else
-          crd = rewriter.create<ToCoordinatesOp>(loc, tensor, l);
+          crd = ToCoordinatesOp::create(rewriter, loc, tensor, l);
         printContents(rewriter, loc, crd);
         break;
       }
       case SparseTensorFieldKind::ValMemRef: {
-        rewriter.create<vector::PrintOp>(loc,
-                                         rewriter.getStringAttr("values : "));
-        auto val = rewriter.create<ToValuesOp>(loc, tensor);
+        vector::PrintOp::create(rewriter, loc,
+                                rewriter.getStringAttr("values : "));
+        auto val = ToValuesOp::create(rewriter, loc, tensor);
         printContents(rewriter, loc, val);
         break;
       }
       }
       return true;
     });
-    rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("----\n"));
+    vector::PrintOp::create(rewriter, loc, rewriter.getStringAttr("----\n"));
     rewriter.eraseOp(op);
     return success();
   }
@@ -797,7 +798,7 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
     auto shape = cast<ShapedType>(vec.getType()).getShape();
     SmallVector<Value> idxs;
     printContentsLevel(rewriter, loc, vec, 0, shape, idxs);
-    rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::NewLine);
+    vector::PrintOp::create(rewriter, loc, vector::PrintPunctuation::NewLine);
   }
 
   // Helper to the helper.
@@ -805,13 +806,13 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
                                  Value vec, unsigned i, ArrayRef<int64_t> shape,
                                  SmallVectorImpl<Value> &idxs) {
     // Open bracket.
-    rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Open);
+    vector::PrintOp::create(rewriter, loc, vector::PrintPunctuation::Open);
     // Generate for loop.
     auto zero = constantIndex(rewriter, loc, 0);
     auto index = constantIndex(rewriter, loc, i);
-    auto size = rewriter.create<memref::DimOp>(loc, vec, index);
+    auto size = memref::DimOp::create(rewriter, loc, vec, index);
     auto step = constantIndex(rewriter, loc, 1);
-    auto forOp = rewriter.create<scf::ForOp>(loc, zero, size, step);
+    auto forOp = scf::ForOp::create(rewriter, loc, zero, size, step);
     idxs.push_back(forOp.getInductionVar());
     rewriter.setInsertionPointToStart(forOp.getBody());
     if (i < shape.size() - 1) {
@@ -819,56 +820,56 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
       printContentsLevel(rewriter, loc, vec, i + 1, shape, idxs);
     } else {
       // Actual contents printing.
-      auto val = rewriter.create<memref::LoadOp>(loc, vec, idxs);
+      auto val = memref::LoadOp::create(rewriter, loc, vec, idxs);
       if (llvm::isa<ComplexType>(val.getType())) {
         // Since the vector dialect does not support complex types in any op,
         // we split those into (real, imag) pairs here.
-        Value real = rewriter.create<complex::ReOp>(loc, val);
-        Value imag = rewriter.create<complex::ImOp>(loc, val);
-        rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Open);
-        rewriter.create<vector::PrintOp>(loc, real,
-                                         vector::PrintPunctuation::Comma);
-        rewriter.create<vector::PrintOp>(loc, imag,
-                                         vector::PrintPunctuation::Close);
+        Value real = complex::ReOp::create(rewriter, loc, val);
+        Value imag = complex::ImOp::create(rewriter, loc, val);
+        vector::PrintOp::create(rewriter, loc, vector::PrintPunctuation::Open);
+        vector::PrintOp::create(rewriter, loc, real,
+                                vector::PrintPunctuation::Comma);
+        vector::PrintOp::create(rewriter, loc, imag,
+                                vector::PrintPunctuation::Close);
       } else {
-        rewriter.create<vector::PrintOp>(
-            loc, val, vector::PrintPunctuation::NoPunctuation);
+        vector::PrintOp::create(rewriter, loc, val,
+                                vector::PrintPunctuation::NoPunctuation);
       }
       // Terminating comma (except at end).
-      auto bound = rewriter.create<arith::AddIOp>(loc, idxs.back(), step);
-      Value cond = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ne,
-                                                  bound, size);
-      scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, cond, /*else*/ false);
+      auto bound = arith::AddIOp::create(rewriter, loc, idxs.back(), step);
+      Value cond = arith::CmpIOp::create(rewriter, loc,
+                                         arith::CmpIPredicate::ne, bound, size);
+      scf::IfOp ifOp = scf::IfOp::create(rewriter, loc, cond, /*else*/ false);
       rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
-      rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Comma);
+      vector::PrintOp::create(rewriter, loc, vector::PrintPunctuation::Comma);
     }
     idxs.pop_back();
     rewriter.setInsertionPointAfter(forOp);
     // Close bracket.
-    rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Close);
+    vector::PrintOp::create(rewriter, loc, vector::PrintPunctuation::Close);
   }
 
   // Helper method to print run-time lvl/dim sizes.
   static void printSizes(PatternRewriter &rewriter, Location loc, Value tensor,
                          unsigned size, bool isDim) {
     // Open bracket.
-    rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Open);
+    vector::PrintOp::create(rewriter, loc, vector::PrintPunctuation::Open);
     // Print unrolled contents (dimop requires constant value).
     for (unsigned i = 0; i < size; i++) {
       auto idx = constantIndex(rewriter, loc, i);
       Value val;
       if (isDim)
-        val = rewriter.create<tensor::DimOp>(loc, tensor, idx);
+        val = tensor::DimOp::create(rewriter, loc, tensor, idx);
       else
-        val = rewriter.create<LvlOp>(loc, tensor, idx);
-      rewriter.create<vector::PrintOp>(
-          loc, val,
-          i != size - 1 ? vector::PrintPunctuation::Comma
-                        : vector::PrintPunctuation::NoPunctuation);
+        val = LvlOp::create(rewriter, loc, tensor, idx);
+      vector::PrintOp::create(rewriter, loc, val,
+                              i != size - 1
+                                  ? vector::PrintPunctuation::Comma
+                                  : vector::PrintPunctuation::NoPunctuation);
     }
     // Close bracket and end of line.
-    rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Close);
-    rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::NewLine);
+    vector::PrintOp::create(rewriter, loc, vector::PrintPunctuation::Close);
+    vector::PrintOp::create(rewriter, loc, vector::PrintPunctuation::NewLine);
   }
 };
 
@@ -896,7 +897,7 @@ struct TensorReshapeRewriter : public OpRewritePattern<tensor::ReshapeOp> {
     for (Dimension d : dstTp->getDimShape())
       dstSizes.push_back(constantIndex(rewriter, loc, d));
 
-    Value nnz = rewriter.create<NumberOfEntriesOp>(loc, srcTensor);
+    Value nnz = NumberOfEntriesOp::create(rewriter, loc, srcTensor);
     // Only need an unordered COO buffer if input and output are not sorted
     // in the same way.
     Type bufferTp = getBufferType(
@@ -920,8 +921,8 @@ struct TensorReshapeRewriter : public OpRewritePattern<tensor::ReshapeOp> {
     //   %t = sparse_tensor.cast %tmp
     // depending on whether the input/output are sorted in the same way.
     const auto encSrc = srcTp->getEncoding();
-    ForeachOp foreachOp = rewriter.create<ForeachOp>(
-        loc, srcTensor, buffer,
+    ForeachOp foreachOp = ForeachOp::create(
+        rewriter, loc, srcTensor, buffer,
         [&](OpBuilder &builder, Location loc, ValueRange srcLcvs, Value v,
             ValueRange reduc) {
           const Dimension srcRank = srcTp->getDimRank();
@@ -935,7 +936,7 @@ struct TensorReshapeRewriter : public OpRewritePattern<tensor::ReshapeOp> {
           Value collapseSize = constantIndex(builder, loc, 1);
           for (Dimension d = 0; d < srcRank; d++)
             collapseSize =
-                builder.create<arith::MulIOp>(loc, collapseSize, srcSizes[d]);
+                arith::MulIOp::create(builder, loc, collapseSize, srcSizes[d]);
           SmallVector<Value, 1> collapsedSizes = {collapseSize};
 
           ReassociationIndices collapseIdx;
@@ -955,15 +956,15 @@ struct TensorReshapeRewriter : public OpRewritePattern<tensor::ReshapeOp> {
                      dstSizes, dstDcvs);
 
           auto t =
-              builder.create<tensor::InsertOp>(loc, v, reduc.front(), dstDcvs);
-          builder.create<sparse_tensor::YieldOp>(loc, t);
+              tensor::InsertOp::create(builder, loc, v, reduc.front(), dstDcvs);
+          sparse_tensor::YieldOp::create(builder, loc, t);
         });
 
-    Value t = rewriter.create<LoadOp>(loc, foreachOp.getResult(0), true);
+    Value t = LoadOp::create(rewriter, loc, foreachOp.getResult(0), true);
     if (bufferTp != *dstTp) {
       auto dstRTT = dstTp->getRankedTensorType();
-      Value converted = rewriter.create<ConvertOp>(loc, dstRTT, t).getResult();
-      rewriter.create<DeallocTensorOp>(loc, t);
+      Value converted = ConvertOp::create(rewriter, loc, dstRTT, t).getResult();
+      DeallocTensorOp::create(rewriter, loc, t);
       t = converted;
     }
     rewriter.replaceOp(op, t);
@@ -1004,7 +1005,7 @@ struct Sparse2SparseReshapeRewriter : public OpRewritePattern<ReshapeOp> {
           dstDynSizes.push_back(dstSizes[idx]);
       }
     }
-    Value nnz = rewriter.create<NumberOfEntriesOp>(loc, srcTensor);
+    Value nnz = NumberOfEntriesOp::create(rewriter, loc, srcTensor);
     // Only need a unordered COO buffer if input and output are not sorted
     // in the same way.
     Type bufferTp = getBufferType(
@@ -1025,8 +1026,8 @@ struct Sparse2SparseReshapeRewriter : public OpRewritePattern<ReshapeOp> {
     //   %t = sparse_tensor.cast %tmp
     // depending on whether the input/output are sorted in the same way.
     const auto encSrc = srcTp.getEncoding();
-    ForeachOp foreachOp = rewriter.create<ForeachOp>(
-        loc, srcTensor, buffer,
+    ForeachOp foreachOp = ForeachOp::create(
+        rewriter, loc, srcTensor, buffer,
         [&](OpBuilder &builder, Location loc, ValueRange srcLcvs, Value v,
             ValueRange reduc) {
           const Dimension dimRank = srcTp.getDimRank();
@@ -1040,15 +1041,15 @@ struct Sparse2SparseReshapeRewriter : public OpRewritePattern<ReshapeOp> {
           reshapeCvs(builder, loc, op.getReassociationIndices(), srcSizes,
                      srcDcvs, dstSizes, dstDcvs);
           auto t =
-              builder.create<tensor::InsertOp>(loc, v, reduc.front(), dstDcvs);
-          builder.create<sparse_tensor::YieldOp>(loc, t);
+              tensor::InsertOp::create(builder, loc, v, reduc.front(), dstDcvs);
+          sparse_tensor::YieldOp::create(builder, loc, t);
         });
 
-    Value t = rewriter.create<LoadOp>(loc, foreachOp.getResult(0), true);
+    Value t = LoadOp::create(rewriter, loc, foreachOp.getResult(0), true);
     if (bufferTp != dstTp) {
       auto dstRTT = dstTp.getRankedTensorType();
-      Value converted = rewriter.create<ConvertOp>(loc, dstRTT, t).getResult();
-      rewriter.create<DeallocTensorOp>(loc, t);
+      Value converted = ConvertOp::create(rewriter, loc, dstRTT, t).getResult();
+      DeallocTensorOp::create(rewriter, loc, t);
       t = converted;
     }
     rewriter.replaceOp(op, t);
@@ -1079,7 +1080,7 @@ struct ReshapeRewriter : public OpRewritePattern<ReshapeOp> {
       auto rtp = getRankedTensorType(op.getSrc());
       auto denseTp =
           RankedTensorType::get(rtp.getShape(), rtp.getElementType());
-      auto convert = rewriter.create<ConvertOp>(loc, denseTp, op.getSrc());
+      auto convert = ConvertOp::create(rewriter, loc, denseTp, op.getSrc());
       rewriter.modifyOpInPlace(op, [&]() { op->setOperand(0, convert); });
       return success();
     }
@@ -1089,14 +1090,14 @@ struct ReshapeRewriter : public OpRewritePattern<ReshapeOp> {
           RankedTensorType::get(rtp.getShape(), rtp.getElementType());
       ReshapeOp reshape;
       if constexpr (std::is_same<ReshapeOp, tensor::ExpandShapeOp>::value) {
-        reshape = rewriter.create<ReshapeOp>(
-            loc, denseTp, op.getSrc(), op.getReassociation(),
-            op.getOutputShape(), op.getStaticOutputShape());
+        reshape = ReshapeOp::create(rewriter, loc, denseTp, op.getSrc(),
+                                    op.getReassociation(), op.getOutputShape(),
+                                    op.getStaticOutputShape());
       } else {
-        reshape = rewriter.create<ReshapeOp>(loc, denseTp, op.getSrc(),
-                                             op.getReassociation());
+        reshape = ReshapeOp::create(rewriter, loc, denseTp, op.getSrc(),
+                                    op.getReassociation());
       }
-      Value convert = rewriter.create<ConvertOp>(loc, rtp, reshape);
+      Value convert = ConvertOp::create(rewriter, loc, rtp, reshape);
       rewriter.replaceOp(op, convert);
       return success();
     }
@@ -1112,20 +1113,20 @@ struct TensorLike {
     SmallVector<Value> dynSzs;
     getDynamicSizes(rtt, sizes, dynSzs);
 
-    val = builder.create<AllocTensorOp>(loc, rtt, dynSzs);
+    val = AllocTensorOp::create(builder, loc, rtt, dynSzs);
     if (!isSparse()) {
       Value c0 = constantZero(builder, loc, rtt.getElementType());
-      val = builder.create<linalg::FillOp>(loc, c0, val).getResult(0);
+      val = linalg::FillOp::create(builder, loc, c0, val).getResult(0);
     }
   }
 
   void insert(OpBuilder &builder, Location loc, Value v, ValueRange crds) {
-    val = builder.create<tensor::InsertOp>(loc, v, val, crds);
+    val = tensor::InsertOp::create(builder, loc, v, val, crds);
   }
 
   Value finalize(OpBuilder &builder, Location loc, RankedTensorType rtp) const {
     if (isSparse())
-      return builder.create<LoadOp>(loc, val, true);
+      return LoadOp::create(builder, loc, val, true);
     return val;
   }
 
@@ -1160,19 +1161,21 @@ struct SparseTensorDimOpRewriter : public OpRewritePattern<tensor::DimOp> {
     Location loc = op.getLoc();
     SmallVector<Value> maxLvlCrds;
     for (Level l = 0; l < stt->getLvlRank(); l++) {
-      Value lvlSz = rewriter.create<LvlOp>(loc, op.getSource(), l);
-      Value maxLvlCrd = rewriter.create<arith::SubIOp>(
-          loc, lvlSz, constantOne(rewriter, loc, rewriter.getIndexType()));
+      Value lvlSz = LvlOp::create(rewriter, loc, op.getSource(), l);
+      Value maxLvlCrd = arith::SubIOp::create(
+          rewriter, loc, lvlSz,
+          constantOne(rewriter, loc, rewriter.getIndexType()));
       maxLvlCrds.push_back(maxLvlCrd);
     }
 
     AffineExpr lvl2DimExp = stt->getLvlToDim().getResult(*dim);
-    Value maxDimCrd = rewriter.create<affine::AffineApplyOp>(
-        op.getLoc(), AffineMap::get(stt->getLvlRank(), 0, lvl2DimExp),
+    Value maxDimCrd = affine::AffineApplyOp::create(
+        rewriter, op.getLoc(), AffineMap::get(stt->getLvlRank(), 0, lvl2DimExp),
         maxLvlCrds);
 
-    Value dimSz = rewriter.create<arith::AddIOp>(
-        loc, maxDimCrd, constantOne(rewriter, loc, rewriter.getIndexType()));
+    Value dimSz = arith::AddIOp::create(
+        rewriter, loc, maxDimCrd,
+        constantOne(rewriter, loc, rewriter.getIndexType()));
     rewriter.replaceOp(op, dimSz);
     return success();
   }
@@ -1212,26 +1215,27 @@ struct ConcatenateRewriter : public OpRewritePattern<ConcatenateOp> {
     for (Value input : op.getInputs()) {
       // Builds a for op for each input tensor to append new values into the
       // output tensor.
-      foreachOp = rewriter.create<ForeachOp>(
-          loc, input, iterArg,
+      foreachOp = ForeachOp::create(
+          rewriter, loc, input, iterArg,
           [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v,
               ValueRange reduc) {
             SmallVector<Value> offDimCrd(dcvs);
             offDimCrd[conDim] =
-                builder.create<arith::AddIOp>(loc, offDimCrd[conDim], offset);
+                arith::AddIOp::create(builder, loc, offDimCrd[conDim], offset);
 
             // Enters foreach, updates the SSA chain.
             dstBuf.val = reduc.front();
             if (!dstTp.isAllDense()) {
               Value cond = genIsNonzero(builder, loc, v);
-              auto ifOp = builder.create<scf::IfOp>(loc, reduc.getTypes(), cond,
-                                                    /*else*/ true);
+              auto ifOp =
+                  scf::IfOp::create(builder, loc, reduc.getTypes(), cond,
+                                    /*else*/ true);
               builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-              builder.create<scf::YieldOp>(loc, dstBuf.val);
+              scf::YieldOp::create(builder, loc, dstBuf.val);
 
               builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
               dstBuf.insert(builder, loc, v, offDimCrd);
-              builder.create<scf::YieldOp>(loc, dstBuf.val);
+              scf::YieldOp::create(builder, loc, dstBuf.val);
 
               // Exits the ifOp, update the sparse tensor SSA value.
               builder.setInsertionPointAfter(ifOp);
@@ -1239,15 +1243,15 @@ struct ConcatenateRewriter : public OpRewritePattern<ConcatenateOp> {
             } else {
               dstBuf.insert(builder, loc, v, offDimCrd);
             }
-            builder.create<sparse_tensor::YieldOp>(loc, dstBuf.val);
+            sparse_tensor::YieldOp::create(builder, loc, dstBuf.val);
           });
       // Accumulates the offset. Note that only static-shaped inputs are allowed
       // by concatenate op verifier, which saves us from computing the offset
       // dynamically.
       const Size sz = getSparseTensorType(input).getDynamicDimSize(conDim);
       assert(ShapedType::isStatic(sz));
-      offset = rewriter.create<arith::AddIOp>(loc, offset,
-                                              constantIndex(rewriter, loc, sz));
+      offset = arith::AddIOp::create(rewriter, loc, offset,
+                                     constantIndex(rewriter, loc, sz));
       iterArg = foreachOp.getResult(0);
       dstBuf.val = iterArg;
     }
@@ -1299,22 +1303,22 @@ struct DirectConvertRewriter : public OpRewritePattern<ConvertOp> {
     ValueRange vs;
     TensorLike dstBuf(rewriter, loc, dstStt.getRankedTensorType(), sizes);
 
-    auto foreachOp = rewriter.create<ForeachOp>(
-        loc, src, dstBuf.val, foreachOrder,
+    auto foreachOp = ForeachOp::create(
+        rewriter, loc, src, dstBuf.val, foreachOrder,
         [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v,
             ValueRange reduc) {
           // Enters the loop, update the SSA value for insertion chain.
           dstBuf.val = reduc.front();
           if (!skipZeroCheck) {
             Value cond = genIsNonzero(builder, loc, v);
-            auto ifOp = builder.create<scf::IfOp>(loc, reduc.getTypes(), cond,
-                                                  /*else*/ true);
+            auto ifOp = scf::IfOp::create(builder, loc, reduc.getTypes(), cond,
+                                          /*else*/ true);
             builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-            builder.create<scf::YieldOp>(loc, dstBuf.val);
+            scf::YieldOp::create(builder, loc, dstBuf.val);
 
             builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
             dstBuf.insert(builder, loc, v, dcvs);
-            builder.create<scf::YieldOp>(loc, dstBuf.val);
+            scf::YieldOp::create(builder, loc, dstBuf.val);
 
             // Exits the ifOp, update the sparse tensor SSA value.
             builder.setInsertionPointAfter(ifOp);
@@ -1322,7 +1326,7 @@ struct DirectConvertRewriter : public OpRewritePattern<ConvertOp> {
           } else {
             dstBuf.insert(builder, loc, v, dcvs);
           }
-          builder.create<sparse_tensor::YieldOp>(loc, dstBuf.val);
+          sparse_tensor::YieldOp::create(builder, loc, dstBuf.val);
         });
 
     rewriter.setInsertionPointAfter(foreachOp);
@@ -1349,8 +1353,8 @@ struct CrdTranslateRewriter : public OpRewritePattern<CrdTranslateOp> {
       // TODO: we should probably expand the affine map to IR using our own
       // rules, since affine.apply assume signed value, while the cooridinates
       // we provided must always be signless.
-      Value trans = rewriter.create<affine::AffineApplyOp>(
-          op.getLoc(), AffineMap::get(map.getNumDims(), 0, result),
+      Value trans = affine::AffineApplyOp::create(
+          rewriter, op.getLoc(), AffineMap::get(map.getNumDims(), 0, result),
           op.getInCrds());
       outCrds.push_back(trans);
     }
@@ -1412,8 +1416,8 @@ struct ForeachRewriter : public OpRewritePattern<ForeachOp> {
     SmallVector<Value> pos = loopEmitter.getValPosits(0);
     // Loads the value from sparse tensor using position-index;
     // loads the value from dense tensor using coords.
-    Value val = enc ? rewriter.create<memref::LoadOp>(loc, vals, pos)
-                    : rewriter.create<memref::LoadOp>(loc, vals, lcvs);
+    Value val = enc ? memref::LoadOp::create(rewriter, loc, vals, pos)
+                    : memref::LoadOp::create(rewriter, loc, vals, lcvs);
 
     // 2. Inline the block in the foreach operator.
     Block *srcBlock = op.getBody();
@@ -1472,22 +1476,22 @@ struct NewRewriter : public OpRewritePattern<NewOp> {
     // with enveloping reinterpreted_map ops for non-permutations.
     RankedTensorType dstTp = stt.getRankedTensorType();
     RankedTensorType cooTp = stt.getCOOType(/*ordered=*/true);
-    Value cooTensor = rewriter.create<NewOp>(loc, cooTp, op.getSource());
+    Value cooTensor = NewOp::create(rewriter, loc, cooTp, op.getSource());
     Value convert = cooTensor;
     auto enc = stt.getEncoding();
     if (!stt.isPermutation()) { // demap coo, demap dstTp
       auto coo = getSparseTensorType(cooTensor).getEncoding().withoutDimToLvl();
-      convert = rewriter.create<ReinterpretMapOp>(loc, coo, convert);
+      convert = ReinterpretMapOp::create(rewriter, loc, coo, convert);
       dstTp = getSparseTensorType(convert).withEncoding(enc.withoutDimToLvl());
     }
-    convert = rewriter.create<ConvertOp>(loc, dstTp, convert);
+    convert = ConvertOp::create(rewriter, loc, dstTp, convert);
     if (!stt.isPermutation()) // remap to original enc
-      convert = rewriter.create<ReinterpretMapOp>(loc, enc, convert);
+      convert = ReinterpretMapOp::create(rewriter, loc, enc, convert);
     rewriter.replaceOp(op, convert);
 
     // Release the temporary ordered COO tensor.
     rewriter.setInsertionPointAfterValue(convert);
-    rewriter.create<DeallocTensorOp>(loc, cooTensor);
+    DeallocTensorOp::create(rewriter, loc, cooTensor);
 
     return success();
   }
@@ -1501,7 +1505,7 @@ struct OutRewriter : public OpRewritePattern<OutOp> {
     Location loc = op.getLoc();
     // Calculate NNZ.
     Value src = op.getTensor();
-    Value nnz = rewriter.create<NumberOfEntriesOp>(loc, src);
+    Value nnz = NumberOfEntriesOp::create(rewriter, loc, src);
 
     // Allocate a temporary buffer for storing dimension-sizes/coordinates.
     const auto srcTp = getSparseTensorType(src);
@@ -1514,8 +1518,8 @@ struct OutRewriter : public OpRewritePattern<OutOp> {
     SmallVector<Value> dims;
     sizesForTensor(rewriter, dims, loc, srcTp, src);
     for (Dimension d = 0; d < dimRank; d++) {
-      rewriter.create<memref::StoreOp>(loc, dims[d], dimSizes,
-                                       constantIndex(rewriter, loc, d));
+      memref::StoreOp::create(rewriter, loc, dims[d], dimSizes,
+                              constantIndex(rewriter, loc, d));
     }
 
     // Create a sparse tensor writer and output meta data.
@@ -1536,20 +1540,20 @@ struct OutRewriter : public OpRewritePattern<OutOp> {
     ModuleOp module = op->getParentOfType<ModuleOp>();
 
     // For each element in the source tensor, output the element.
-    rewriter.create<ForeachOp>(
-        loc, src, ValueRange(),
+    ForeachOp::create(
+        rewriter, loc, src, ValueRange(),
         [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v,
             ValueRange reduc) {
           for (Dimension d = 0; d < dimRank; d++) {
-            rewriter.create<memref::StoreOp>(loc, dcvs[d], dimCoords,
-                                             constantIndex(builder, loc, d));
+            memref::StoreOp::create(rewriter, loc, dcvs[d], dimCoords,
+                                    constantIndex(builder, loc, d));
           }
-          rewriter.create<memref::StoreOp>(loc, v, value);
+          memref::StoreOp::create(rewriter, loc, v, value);
           SmallVector<Value> operands{writer, rankValue, dimCoords, value};
           FlatSymbolRefAttr fn = getFunc(module, outNextFuncName, {}, operands,
                                          EmitCInterface::On);
-          builder.create<func::CallOp>(loc, TypeRange(), fn, operands);
-          builder.create<sparse_tensor::YieldOp>(loc);
+          func::CallOp::create(builder, loc, TypeRange(), fn, operands);
+          sparse_tensor::YieldOp::create(builder, loc);
         });
 
     // Release the writer.

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
index 52b66badef44b..4464450fd328f 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
@@ -78,7 +78,7 @@ static Value genVectorMask(PatternRewriter &rewriter, Location loc, VL vl,
       matchPattern(step, m_Constant(&stepInt))) {
     if (((hiInt.getInt() - loInt.getInt()) % stepInt.getInt()) == 0) {
       Value trueVal = constantI1(rewriter, loc, true);
-      return rewriter.create<vector::BroadcastOp>(loc, mtp, trueVal);
+      return vector::BroadcastOp::create(rewriter, loc, mtp, trueVal);
     }
   }
   // Otherwise, generate a vector mask that avoids overrunning the upperbound
@@ -92,7 +92,7 @@ static Value genVectorMask(PatternRewriter &rewriter, Location loc, VL vl,
       rewriter.getContext());
   Value end = rewriter.createOrFold<affine::AffineMinOp>(
       loc, min, ValueRange{hi, iv, step});
-  return rewriter.create<vector::CreateMaskOp>(loc, mtp, end);
+  return vector::CreateMaskOp::create(rewriter, loc, mtp, end);
 }
 
 /// Generates a vectorized invariant. Here we rely on subsequent loop
@@ -100,7 +100,7 @@ static Value genVectorMask(PatternRewriter &rewriter, Location loc, VL vl,
 static Value genVectorInvariantValue(PatternRewriter &rewriter, VL vl,
                                      Value val) {
   VectorType vtp = vectorType(vl, val.getType());
-  return rewriter.create<vector::BroadcastOp>(val.getLoc(), vtp, val);
+  return vector::BroadcastOp::create(rewriter, val.getLoc(), vtp, val);
 }
 
 /// Generates a vectorized load lhs = a[ind[lo:hi]] or lhs = a[lo:hi],
@@ -115,11 +115,11 @@ static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl,
     SmallVector<Value> scalarArgs(idxs);
     Value indexVec = idxs.back();
     scalarArgs.back() = constantIndex(rewriter, loc, 0);
-    return rewriter.create<vector::GatherOp>(loc, vtp, mem, scalarArgs,
-                                             indexVec, vmask, pass);
+    return vector::GatherOp::create(rewriter, loc, vtp, mem, scalarArgs,
+                                    indexVec, vmask, pass);
   }
-  return rewriter.create<vector::MaskedLoadOp>(loc, vtp, mem, idxs, vmask,
-                                               pass);
+  return vector::MaskedLoadOp::create(rewriter, loc, vtp, mem, idxs, vmask,
+                                      pass);
 }
 
 /// Generates a vectorized store a[ind[lo:hi]] = rhs or a[lo:hi] = rhs
@@ -132,11 +132,11 @@ static void genVectorStore(PatternRewriter &rewriter, Location loc, Value mem,
     SmallVector<Value> scalarArgs(idxs);
     Value indexVec = idxs.back();
     scalarArgs.back() = constantIndex(rewriter, loc, 0);
-    rewriter.create<vector::ScatterOp>(loc, mem, scalarArgs, indexVec, vmask,
-                                       rhs);
+    vector::ScatterOp::create(rewriter, loc, mem, scalarArgs, indexVec, vmask,
+                              rhs);
     return;
   }
-  rewriter.create<vector::MaskedStoreOp>(loc, mem, idxs, vmask, rhs);
+  vector::MaskedStoreOp::create(rewriter, loc, mem, idxs, vmask, rhs);
 }
 
 /// Detects a vectorizable reduction operations and returns the
@@ -197,18 +197,18 @@ static Value genVectorReducInit(PatternRewriter &rewriter, Location loc,
   case vector::CombiningKind::ADD:
   case vector::CombiningKind::XOR:
     // Initialize reduction vector to: | 0 | .. | 0 | r |
-    return rewriter.create<vector::InsertOp>(loc, r,
-                                             constantZero(rewriter, loc, vtp),
-                                             constantIndex(rewriter, loc, 0));
+    return vector::InsertOp::create(rewriter, loc, r,
+                                    constantZero(rewriter, loc, vtp),
+                                    constantIndex(rewriter, loc, 0));
   case vector::CombiningKind::MUL:
     // Initialize reduction vector to: | 1 | .. | 1 | r |
-    return rewriter.create<vector::InsertOp>(loc, r,
-                                             constantOne(rewriter, loc, vtp),
-                                             constantIndex(rewriter, loc, 0));
+    return vector::InsertOp::create(rewriter, loc, r,
+                                    constantOne(rewriter, loc, vtp),
+                                    constantIndex(rewriter, loc, 0));
   case vector::CombiningKind::AND:
   case vector::CombiningKind::OR:
     // Initialize reduction vector to: | r | .. | r | r |
-    return rewriter.create<vector::BroadcastOp>(loc, vtp, r);
+    return vector::BroadcastOp::create(rewriter, loc, vtp, r);
   default:
     break;
   }
@@ -300,11 +300,11 @@ static bool vectorizeSubscripts(PatternRewriter &rewriter, scf::ForOp forOp,
         Type etp = llvm::cast<VectorType>(vload.getType()).getElementType();
         if (!llvm::isa<IndexType>(etp)) {
           if (etp.getIntOrFloatBitWidth() < 32)
-            vload = rewriter.create<arith::ExtUIOp>(
-                loc, vectorType(vl, rewriter.getI32Type()), vload);
+            vload = arith::ExtUIOp::create(
+                rewriter, loc, vectorType(vl, rewriter.getI32Type()), vload);
           else if (etp.getIntOrFloatBitWidth() < 64 && !vl.enableSIMDIndex32)
-            vload = rewriter.create<arith::ExtUIOp>(
-                loc, vectorType(vl, rewriter.getI64Type()), vload);
+            vload = arith::ExtUIOp::create(
+                rewriter, loc, vectorType(vl, rewriter.getI64Type()), vload);
         }
         idxs.push_back(vload);
       }
@@ -328,7 +328,7 @@ static bool vectorizeSubscripts(PatternRewriter &rewriter, scf::ForOp forOp,
             return false;
           if (codegen)
             idxs.push_back(
-                rewriter.create<arith::AddIOp>(forOp.getLoc(), inv, idx));
+                arith::AddIOp::create(rewriter, forOp.getLoc(), inv, idx));
           continue; // success so far
         }
       }
@@ -341,7 +341,7 @@ static bool vectorizeSubscripts(PatternRewriter &rewriter, scf::ForOp forOp,
 #define UNAOP(xxx)                                                             \
   if (isa<xxx>(def)) {                                                         \
     if (codegen)                                                               \
-      vexp = rewriter.create<xxx>(loc, vx);                                    \
+      vexp = xxx::create(rewriter, loc, vx);                                   \
     return true;                                                               \
   }
 
@@ -349,7 +349,7 @@ static bool vectorizeSubscripts(PatternRewriter &rewriter, scf::ForOp forOp,
   if (auto x = dyn_cast<xxx>(def)) {                                           \
     if (codegen) {                                                             \
       VectorType vtp = vectorType(vl, x.getType());                            \
-      vexp = rewriter.create<xxx>(loc, vtp, vx);                               \
+      vexp = xxx::create(rewriter, loc, vtp, vx);                              \
     }                                                                          \
     return true;                                                               \
   }
@@ -357,7 +357,7 @@ static bool vectorizeSubscripts(PatternRewriter &rewriter, scf::ForOp forOp,
 #define BINOP(xxx)                                                             \
   if (isa<xxx>(def)) {                                                         \
     if (codegen)                                                               \
-      vexp = rewriter.create<xxx>(loc, vx, vy);                                \
+      vexp = xxx::create(rewriter, loc, vx, vy);                               \
     return true;                                                               \
   }
 
@@ -380,9 +380,9 @@ static bool vectorizeExpr(PatternRewriter &rewriter, scf::ForOp forOp, VL vl,
       // such as a[i] = i, which must convert to [i, i+1, ...].
       if (codegen) {
         VectorType vtp = vectorType(vl, arg.getType());
-        Value veci = rewriter.create<vector::BroadcastOp>(loc, vtp, arg);
-        Value incr = rewriter.create<vector::StepOp>(loc, vtp);
-        vexp = rewriter.create<arith::AddIOp>(loc, veci, incr);
+        Value veci = vector::BroadcastOp::create(rewriter, loc, vtp, arg);
+        Value incr = vector::StepOp::create(rewriter, loc, vtp);
+        vexp = arith::AddIOp::create(rewriter, loc, veci, incr);
       }
       return true;
     }
@@ -525,16 +525,16 @@ static bool vectorizeStmt(PatternRewriter &rewriter, scf::ForOp forOp, VL vl,
     Value step = constantIndex(rewriter, loc, vl.vectorLength);
     if (vl.enableVLAVectorization) {
       Value vscale =
-          rewriter.create<vector::VectorScaleOp>(loc, rewriter.getIndexType());
-      step = rewriter.create<arith::MulIOp>(loc, vscale, step);
+          vector::VectorScaleOp::create(rewriter, loc, rewriter.getIndexType());
+      step = arith::MulIOp::create(rewriter, loc, vscale, step);
     }
     if (!yield.getResults().empty()) {
       Value init = forOp.getInitArgs()[0];
       VectorType vtp = vectorType(vl, init.getType());
       Value vinit = genVectorReducInit(rewriter, loc, yield->getOperand(0),
                                        forOp.getRegionIterArg(0), init, vtp);
-      forOpNew = rewriter.create<scf::ForOp>(
-          loc, forOp.getLowerBound(), forOp.getUpperBound(), step, vinit);
+      forOpNew = scf::ForOp::create(rewriter, loc, forOp.getLowerBound(),
+                                    forOp.getUpperBound(), step, vinit);
       forOpNew->setAttr(
           LoopEmitter::getLoopEmitterLoopAttrName(),
           forOp->getAttr(LoopEmitter::getLoopEmitterLoopAttrName()));
@@ -562,10 +562,10 @@ static bool vectorizeStmt(PatternRewriter &rewriter, scf::ForOp forOp, VL vl,
       if (codegen) {
         Value partial = forOpNew.getResult(0);
         Value vpass = genVectorInvariantValue(rewriter, vl, iter);
-        Value vred = rewriter.create<arith::SelectOp>(loc, vmask, vrhs, vpass);
-        rewriter.create<scf::YieldOp>(loc, vred);
+        Value vred = arith::SelectOp::create(rewriter, loc, vmask, vrhs, vpass);
+        scf::YieldOp::create(rewriter, loc, vred);
         rewriter.setInsertionPointAfter(forOpNew);
-        Value vres = rewriter.create<vector::ReductionOp>(loc, kind, partial);
+        Value vres = vector::ReductionOp::create(rewriter, loc, kind, partial);
         // Now do some relinking (last one is not completely type safe
         // but all bad ones are removed right away). This also folds away
         // nop broadcast operations.

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index d0e3e88f131d3..0a5f5595bba56 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -316,8 +316,8 @@ static void genBuffers(CodegenEnv &env, OpBuilder &builder) {
         if (!isInit) {
           Value zero = constantZero(builder, loc,
                                     getElementTypeOrSelf(tensor.getType()));
-          builder.create<linalg::FillOp>(loc, ValueRange{zero},
-                                         ValueRange{init});
+          linalg::FillOp::create(builder, loc, ValueRange{zero},
+                                 ValueRange{init});
         }
         return init;
       },
@@ -379,7 +379,7 @@ static Value genInsertionLoad(CodegenEnv &env, OpBuilder &builder,
   }
   // Load from expanded access pattern.
   Value index = genIndex(env, t);
-  return builder.create<memref::LoadOp>(loc, env.getExpandValues(), index);
+  return memref::LoadOp::create(builder, loc, env.getExpandValues(), index);
 }
 
 /// Generates insertion code to implement dynamic tensor load for reduction.
@@ -395,22 +395,22 @@ static Value genInsertionLoadReduce(CodegenEnv &env, OpBuilder &builder,
   Value values = env.getExpandValues();
   Value filled = env.getExpandFilled();
   Value index = genIndex(env, t);
-  Value isFilled = builder.create<memref::LoadOp>(loc, filled, index);
-  Value valAtIndex = builder.create<memref::LoadOp>(loc, values, index);
-  return builder.create<arith::SelectOp>(loc, isFilled, valAtIndex, identity);
+  Value isFilled = memref::LoadOp::create(builder, loc, filled, index);
+  Value valAtIndex = memref::LoadOp::create(builder, loc, values, index);
+  return arith::SelectOp::create(builder, loc, isFilled, valAtIndex, identity);
 }
 
 static Value genConditionalInsert(Location loc, OpBuilder &builder, Value cond,
                                   Value sparseOut, ValueRange ivs, Value v) {
   scf::IfOp condInsert =
-      builder.create<scf::IfOp>(loc, sparseOut.getType(), cond, true);
+      scf::IfOp::create(builder, loc, sparseOut.getType(), cond, true);
   // True branch.
   builder.setInsertionPointToStart(condInsert.thenBlock());
-  Value res = builder.create<tensor::InsertOp>(loc, v, sparseOut, ivs);
-  builder.create<scf::YieldOp>(loc, res);
+  Value res = tensor::InsertOp::create(builder, loc, v, sparseOut, ivs);
+  scf::YieldOp::create(builder, loc, res);
   // False branch.
   builder.setInsertionPointToStart(condInsert.elseBlock());
-  builder.create<scf::YieldOp>(loc, sparseOut);
+  scf::YieldOp::create(builder, loc, sparseOut);
   // Value assignment.
   builder.setInsertionPointAfter(condInsert);
   return condInsert.getResult(0);
@@ -447,7 +447,7 @@ static void genInsertionStore(CodegenEnv &env, OpBuilder &builder, OpOperand *t,
         Value nz = genIsNonzero(builder, loc, rhs);
         sparseOut = genConditionalInsert(loc, builder, nz, chain, ivs, rhs);
       } else {
-        sparseOut = builder.create<tensor::InsertOp>(loc, rhs, chain, ivs);
+        sparseOut = tensor::InsertOp::create(builder, loc, rhs, chain, ivs);
       }
       // Generates regular insertion chain.
       env.updateInsertionChain(sparseOut);
@@ -468,25 +468,25 @@ static void genInsertionStore(CodegenEnv &env, OpBuilder &builder, OpOperand *t,
   Value fval = constantI1(builder, loc, false);
   Value tval = constantI1(builder, loc, true);
   // If statement.
-  Value isFilled = builder.create<memref::LoadOp>(loc, filled, index);
-  Value cond = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
-                                             isFilled, fval);
-  scf::IfOp ifOp = builder.create<scf::IfOp>(loc, builder.getIndexType(), cond,
-                                             /*else=*/true);
+  Value isFilled = memref::LoadOp::create(builder, loc, filled, index);
+  Value cond = arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::eq,
+                                     isFilled, fval);
+  scf::IfOp ifOp = scf::IfOp::create(builder, loc, builder.getIndexType(), cond,
+                                     /*else=*/true);
   // True branch.
   builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
-  builder.create<memref::StoreOp>(loc, tval, filled, index);
-  builder.create<memref::StoreOp>(loc, index, added, count);
+  memref::StoreOp::create(builder, loc, tval, filled, index);
+  memref::StoreOp::create(builder, loc, index, added, count);
   Value one = constantIndex(builder, loc, 1);
-  Value add = builder.create<arith::AddIOp>(loc, count, one);
-  builder.create<scf::YieldOp>(loc, add);
+  Value add = arith::AddIOp::create(builder, loc, count, one);
+  scf::YieldOp::create(builder, loc, add);
   // False branch.
   builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-  builder.create<scf::YieldOp>(loc, count);
+  scf::YieldOp::create(builder, loc, count);
   builder.setInsertionPointAfter(ifOp);
   // Value assignment.
   env.updateExpandCount(ifOp.getResult(0));
-  builder.create<memref::StoreOp>(loc, rhs, values, index);
+  memref::StoreOp::create(builder, loc, rhs, values, index);
 }
 
 /// Generates a load on a dense or sparse tensor.
@@ -516,9 +516,10 @@ static Value genTensorLoad(CodegenEnv &env, OpBuilder &builder, ExprId exp) {
   if (llvm::isa<TensorType>(ptr.getType())) {
     assert(env.options().sparseEmitStrategy ==
            SparseEmitStrategy::kSparseIterator);
-    return builder.create<ExtractValOp>(loc, ptr, llvm::getSingleElement(args));
+    return ExtractValOp::create(builder, loc, ptr,
+                                llvm::getSingleElement(args));
   }
-  return builder.create<memref::LoadOp>(loc, ptr, args);
+  return memref::LoadOp::create(builder, loc, ptr, args);
 }
 
 /// Generates a store on a dense or sparse tensor.
@@ -545,7 +546,7 @@ static void genTensorStore(CodegenEnv &env, OpBuilder &builder, ExprId exp,
   if (!env.isSparseOutput(t)) {
     SmallVector<Value> args;
     Value ptr = genSubscript(env, builder, t, args);
-    builder.create<memref::StoreOp>(loc, rhs, ptr, args);
+    memref::StoreOp::create(builder, loc, rhs, ptr, args);
     return;
   }
   // Store during sparse insertion.
@@ -556,7 +557,7 @@ static void genTensorStore(CodegenEnv &env, OpBuilder &builder, ExprId exp,
   // Select operation insertion.
   Value chain = env.getInsertionChain();
   scf::IfOp ifOp =
-      builder.create<scf::IfOp>(loc, chain.getType(), rhs, /*else=*/true);
+      scf::IfOp::create(builder, loc, chain.getType(), rhs, /*else=*/true);
   builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
   // Existing value was preserved to be used here.
   assert(env.exp(exp).val);
@@ -565,10 +566,10 @@ static void genTensorStore(CodegenEnv &env, OpBuilder &builder, ExprId exp,
   env.merger().clearExprValue(exp);
   // Yield modified insertion chain along true branch.
   Value mchain = env.getInsertionChain();
-  builder.create<scf::YieldOp>(op.getLoc(), mchain);
+  scf::YieldOp::create(builder, op.getLoc(), mchain);
   // Yield original insertion chain along false branch.
   builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-  builder.create<scf::YieldOp>(loc, chain);
+  scf::YieldOp::create(builder, loc, chain);
   // Done with if statement.
   env.updateInsertionChain(ifOp->getResult(0));
   builder.setInsertionPointAfter(ifOp);
@@ -597,7 +598,7 @@ static Value relinkBranch(CodegenEnv &env, RewriterBase &rewriter, Block *block,
       assert(!getSparseTensorType(t->get()).hasEncoding()); // dense!
       SmallVector<Value> args;
       Value ptr = genSubscript(env, rewriter, t, args);
-      return rewriter.create<memref::LoadOp>(op.getLoc(), ptr, args);
+      return memref::LoadOp::create(rewriter, op.getLoc(), ptr, args);
     }
   } else if (Operation *def = e.getDefiningOp()) {
     // Handle index computation.
@@ -768,7 +769,8 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopId curr,
     Type t2 = MemRefType::get(dynShape, builder.getI1Type());
     Type t3 = MemRefType::get(dynShape, builder.getIndexType());
     Type t4 = builder.getIndexType();
-    auto r = builder.create<ExpandOp>(loc, TypeRange({t1, t2, t3, t4}), tensor);
+    auto r =
+        ExpandOp::create(builder, loc, TypeRange({t1, t2, t3, t4}), tensor);
     assert(r.getNumResults() == 4);
     env.startExpand(r.getResult(0), r.getResult(1), r.getResult(2),
                     r.getResult(3));
@@ -781,8 +783,8 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopId curr,
     Value added = env.getExpandAdded();
     Value count = env.getExpandCount();
     Value chain = env.getInsertionChain();
-    Value compress = builder.create<CompressOp>(loc, values, filled, added,
-                                                count, chain, indices);
+    Value compress = CompressOp::create(builder, loc, values, filled, added,
+                                        count, chain, indices);
     env.updateInsertionChain(compress);
     env.endExpand();
   }
@@ -889,7 +891,7 @@ static void finalizeWhileOp(CodegenEnv &env, OpBuilder &builder,
         env.updateInsertionChain(ifOp->getResult(y++));
       }
       assert(y == yields.size());
-      builder.create<scf::YieldOp>(loc, yields);
+      scf::YieldOp::create(builder, loc, yields);
       builder.setInsertionPointAfter(ifOp);
     }
   }
@@ -942,13 +944,14 @@ static scf::IfOp genIf(CodegenEnv &env, OpBuilder &builder, LoopId curr,
           assert(lvl.has_value());
           const Value crd = env.emitter().getCoord(tid, *lvl);
           const Value lvar = env.getLoopVar(curr);
-          clause = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
-                                                 crd, lvar);
+          clause = arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::eq,
+                                         crd, lvar);
         } else {
           assert(lt.hasDenseSemantic() || isUndefLT(lt));
           clause = constantI1(builder, loc, true);
         }
-        cond = cond ? builder.create<arith::AndIOp>(loc, cond, clause) : clause;
+        cond =
+            cond ? arith::AndIOp::create(builder, loc, cond, clause) : clause;
       });
   if (env.isReduc()) {
     types.push_back(env.getReduc().getType());
@@ -959,7 +962,7 @@ static scf::IfOp genIf(CodegenEnv &env, OpBuilder &builder, LoopId curr,
     types.push_back(builder.getIndexType());
   if (env.getInsertionChain())
     types.push_back(env.getInsertionChain().getType());
-  scf::IfOp ifOp = builder.create<scf::IfOp>(loc, types, cond, /*else=*/true);
+  scf::IfOp ifOp = scf::IfOp::create(builder, loc, types, cond, /*else=*/true);
   builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
   return ifOp;
 }
@@ -987,7 +990,7 @@ static void endIf(CodegenEnv &env, OpBuilder &builder, scf::IfOp ifOp,
     env.updateInsertionChain(insInput);
   }
   if (!operands.empty())
-    builder.create<scf::YieldOp>(env.op().getLoc(), operands);
+    scf::YieldOp::create(builder, env.op().getLoc(), operands);
   builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
 }
 
@@ -1301,7 +1304,7 @@ static void genStmt(CodegenEnv &env, RewriterBase &rewriter, ExprId exp,
           genStmt(env, rewriter, ej, curr + 1);
           // TODO: handle yield values.
           assert(reduc.empty() && "Not Implemented");
-          rewriter.create<sparse_tensor::YieldOp>(env.op().getLoc());
+          sparse_tensor::YieldOp::create(rewriter, env.op().getLoc());
           return std::nullopt;
         });
         // endIf(env, rewriter, ifOp, redInput, cntInput, insInput, validIns);

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/StageSparseOperations.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/StageSparseOperations.cpp
index 7835c6c3b7797..684a2d418f66c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/StageSparseOperations.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/StageSparseOperations.cpp
@@ -41,7 +41,7 @@ struct GuardSparseAlloc
     // operation that leaves the underlying storage in a proper state
     // before the tensor escapes across the method boundary.
     rewriter.setInsertionPointAfter(op);
-    auto load = rewriter.create<LoadOp>(op.getLoc(), op.getResult(), true);
+    auto load = LoadOp::create(rewriter, op.getLoc(), op.getResult(), true);
     rewriter.replaceAllUsesExcept(op, load, load);
     return success();
   }
@@ -60,7 +60,7 @@ struct StageUnorderedSparseOps : public OpRewritePattern<StageWithSortOp> {
     // Deallocate tmpBuf.
     // TODO: Delegate to buffer deallocation pass in the future.
     if (succeeded(stageResult) && tmpBuf)
-      rewriter.create<bufferization::DeallocTensorOp>(loc, tmpBuf);
+      bufferization::DeallocTensorOp::create(rewriter, loc, tmpBuf);
 
     return stageResult;
   }

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
index 33be62d1d5e7e..f57f7f7fc0946 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
@@ -153,7 +153,7 @@ Value sparse_tensor::genCast(OpBuilder &builder, Location loc, Value value,
 
   // int <=> index
   if (isa<IndexType>(srcTp) || isa<IndexType>(dstTp))
-    return builder.create<arith::IndexCastOp>(loc, dstTp, value);
+    return arith::IndexCastOp::create(builder, loc, dstTp, value);
 
   const auto srcIntTp = dyn_cast_or_null<IntegerType>(srcTp);
   const bool isUnsignedCast = srcIntTp ? srcIntTp.isUnsigned() : false;
@@ -166,19 +166,19 @@ Value sparse_tensor::genScalarToTensor(OpBuilder &builder, Location loc,
     // Scalars can only be converted to 0-ranked tensors.
     assert(rtp.getRank() == 0);
     elem = sparse_tensor::genCast(builder, loc, elem, rtp.getElementType());
-    return builder.create<tensor::FromElementsOp>(loc, rtp, elem);
+    return tensor::FromElementsOp::create(builder, loc, rtp, elem);
   }
   return sparse_tensor::genCast(builder, loc, elem, dstTp);
 }
 
 Value sparse_tensor::genIndexLoad(OpBuilder &builder, Location loc, Value mem,
                                   ValueRange s) {
-  Value load = builder.create<memref::LoadOp>(loc, mem, s);
+  Value load = memref::LoadOp::create(builder, loc, mem, s);
   if (!isa<IndexType>(load.getType())) {
     if (load.getType().getIntOrFloatBitWidth() < 64)
-      load = builder.create<arith::ExtUIOp>(loc, builder.getI64Type(), load);
+      load = arith::ExtUIOp::create(builder, loc, builder.getI64Type(), load);
     load =
-        builder.create<arith::IndexCastOp>(loc, builder.getIndexType(), load);
+        arith::IndexCastOp::create(builder, loc, builder.getIndexType(), load);
   }
   return load;
 }
@@ -203,13 +203,13 @@ Value mlir::sparse_tensor::genIsNonzero(OpBuilder &builder, mlir::Location loc,
   Type tp = v.getType();
   Value zero = constantZero(builder, loc, tp);
   if (isa<FloatType>(tp))
-    return builder.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UNE, v,
-                                         zero);
+    return arith::CmpFOp::create(builder, loc, arith::CmpFPredicate::UNE, v,
+                                 zero);
   if (tp.isIntOrIndex())
-    return builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ne, v,
-                                         zero);
+    return arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::ne, v,
+                                 zero);
   if (isa<ComplexType>(tp))
-    return builder.create<complex::NotEqualOp>(loc, v, zero);
+    return complex::NotEqualOp::create(builder, loc, v, zero);
   llvm_unreachable("Non-numeric type");
 }
 
@@ -223,7 +223,7 @@ void mlir::sparse_tensor::genReshapeDstShape(
     for (const auto &map : llvm::enumerate(reassociation)) {
       auto dstDim = constantIndex(builder, loc, 1);
       for (unsigned i = start; i < start + map.value().size(); i++) {
-        dstDim = builder.create<arith::MulIOp>(loc, dstDim, srcShape[i]);
+        dstDim = arith::MulIOp::create(builder, loc, dstDim, srcShape[i]);
       }
       dstShape.push_back(dstDim);
       start = start + map.value().size();
@@ -257,7 +257,7 @@ void mlir::sparse_tensor::genReshapeDstShape(
         // Compute the dynamic dimension size.
         Value productVal = constantIndex(builder, loc, product);
         Value dynamicSize =
-            builder.create<arith::DivUIOp>(loc, srcDim, productVal);
+            arith::DivUIOp::create(builder, loc, srcDim, productVal);
         dstShape.push_back(dynamicSize);
       } else {
         // The expanded dimension is statically known.
@@ -286,7 +286,7 @@ void mlir::sparse_tensor::reshapeCvs(
     // Prepare strides information in dimension slice.
     Value linear = constantIndex(builder, loc, 1);
     for (unsigned j = start, end = start + map.value().size(); j < end; j++) {
-      linear = builder.create<arith::MulIOp>(loc, linear, sizes[j]);
+      linear = arith::MulIOp::create(builder, loc, linear, sizes[j]);
     }
     // Start expansion.
     Value val;
@@ -294,16 +294,17 @@ void mlir::sparse_tensor::reshapeCvs(
       val = srcCvs[i];
     // Iterate over dimension slice.
     for (unsigned j = start, end = start + map.value().size(); j < end; j++) {
-      linear = builder.create<arith::DivUIOp>(loc, linear, sizes[j]);
+      linear = arith::DivUIOp::create(builder, loc, linear, sizes[j]);
       if (isCollapse) {
-        const Value mul = builder.create<arith::MulIOp>(loc, srcCvs[j], linear);
-        val = val ? builder.create<arith::AddIOp>(loc, val, mul) : mul;
+        const Value mul =
+            arith::MulIOp::create(builder, loc, srcCvs[j], linear);
+        val = val ? arith::AddIOp::create(builder, loc, val, mul) : mul;
       } else {
         const Value old = val;
-        val = builder.create<arith::DivUIOp>(loc, val, linear);
+        val = arith::DivUIOp::create(builder, loc, val, linear);
         assert(dstCvs.size() == j);
         dstCvs.push_back(val);
-        val = builder.create<arith::RemUIOp>(loc, old, linear);
+        val = arith::RemUIOp::create(builder, loc, old, linear);
       }
     }
     // Finalize collapse.
@@ -326,8 +327,8 @@ FlatSymbolRefAttr mlir::sparse_tensor::getFunc(ModuleOp module, StringRef name,
   auto func = module.lookupSymbol<func::FuncOp>(result.getAttr());
   if (!func) {
     OpBuilder moduleBuilder(module.getBodyRegion());
-    func = moduleBuilder.create<func::FuncOp>(
-        module.getLoc(), name,
+    func = func::FuncOp::create(
+        moduleBuilder, module.getLoc(), name,
         FunctionType::get(context, operands.getTypes(), resultType));
     func.setPrivate();
     if (static_cast<bool>(emitCInterface))
@@ -343,7 +344,7 @@ func::CallOp mlir::sparse_tensor::createFuncCall(
   auto module = builder.getBlock()->getParentOp()->getParentOfType<ModuleOp>();
   FlatSymbolRefAttr fn =
       getFunc(module, name, resultType, operands, emitCInterface);
-  return builder.create<func::CallOp>(loc, resultType, fn, operands);
+  return func::CallOp::create(builder, loc, resultType, fn, operands);
 }
 
 Type mlir::sparse_tensor::getOpaquePointerType(MLIRContext *ctx) {
@@ -358,7 +359,7 @@ Value mlir::sparse_tensor::genAlloca(OpBuilder &builder, Location loc,
                                      unsigned sz, Type tp, bool staticShape) {
   if (staticShape) {
     auto memTp = MemRefType::get({sz}, tp);
-    return builder.create<memref::AllocaOp>(loc, memTp);
+    return memref::AllocaOp::create(builder, loc, memTp);
   }
   return genAlloca(builder, loc, constantIndex(builder, loc, sz), tp);
 }
@@ -366,12 +367,12 @@ Value mlir::sparse_tensor::genAlloca(OpBuilder &builder, Location loc,
 Value mlir::sparse_tensor::genAlloca(OpBuilder &builder, Location loc, Value sz,
                                      Type tp) {
   auto memTp = MemRefType::get({ShapedType::kDynamic}, tp);
-  return builder.create<memref::AllocaOp>(loc, memTp, ValueRange{sz});
+  return memref::AllocaOp::create(builder, loc, memTp, ValueRange{sz});
 }
 
 Value mlir::sparse_tensor::genAllocaScalar(OpBuilder &builder, Location loc,
                                            Type tp) {
-  return builder.create<memref::AllocaOp>(loc, MemRefType::get({}, tp));
+  return memref::AllocaOp::create(builder, loc, MemRefType::get({}, tp));
 }
 
 Value mlir::sparse_tensor::allocaBuffer(OpBuilder &builder, Location loc,
@@ -381,7 +382,7 @@ Value mlir::sparse_tensor::allocaBuffer(OpBuilder &builder, Location loc,
   Value buffer = genAlloca(builder, loc, sz, values[0].getType());
   for (unsigned i = 0; i < sz; i++) {
     Value idx = constantIndex(builder, loc, i);
-    builder.create<memref::StoreOp>(loc, values[i], buffer, idx);
+    memref::StoreOp::create(builder, loc, values[i], buffer, idx);
   }
   return buffer;
 }
@@ -397,15 +398,15 @@ Value mlir::sparse_tensor::allocDenseTensor(OpBuilder &builder, Location loc,
     if (shape[i] == ShapedType::kDynamic)
       dynamicSizes.push_back(sizes[i]);
   }
-  Value mem = builder.create<memref::AllocOp>(loc, memTp, dynamicSizes);
+  Value mem = memref::AllocOp::create(builder, loc, memTp, dynamicSizes);
   Value zero = constantZero(builder, loc, elemTp);
-  builder.create<linalg::FillOp>(loc, ValueRange{zero}, ValueRange{mem});
+  linalg::FillOp::create(builder, loc, ValueRange{zero}, ValueRange{mem});
   return mem;
 }
 
 void mlir::sparse_tensor::deallocDenseTensor(OpBuilder &builder, Location loc,
                                              Value buffer) {
-  builder.create<memref::DeallocOp>(loc, buffer);
+  memref::DeallocOp::create(builder, loc, buffer);
 }
 
 void mlir::sparse_tensor::sizesFromSrc(OpBuilder &builder,
@@ -483,17 +484,17 @@ void sparse_tensor::foreachInSparseConstant(
     cvs.clear();
     for (Dimension d = 0; d < dimRank; d++) {
       auto crd = elems[i].first[d].getInt();
-      cvs.push_back(builder.create<arith::ConstantIndexOp>(loc, crd));
+      cvs.push_back(arith::ConstantIndexOp::create(builder, loc, crd));
     }
     // Remap value.
     Value val;
     if (isa<ComplexType>(attr.getElementType())) {
       auto valAttr = cast<ArrayAttr>(elems[i].second);
-      val = builder.create<complex::ConstantOp>(loc, attr.getElementType(),
-                                                valAttr);
+      val = complex::ConstantOp::create(builder, loc, attr.getElementType(),
+                                        valAttr);
     } else {
       auto valAttr = cast<TypedAttr>(elems[i].second);
-      val = builder.create<arith::ConstantOp>(loc, valAttr);
+      val = arith::ConstantOp::create(builder, loc, valAttr);
     }
     assert(val);
     callback(cvs, val);
@@ -513,10 +514,10 @@ SmallVector<Value> sparse_tensor::loadAll(OpBuilder &builder, Location loc,
   SmallVector<Value> vs;
   vs.reserve(size);
   for (unsigned i = 0; i < size; i++) {
-    Value v = builder.create<memref::LoadOp>(loc, mem,
-                                             constantIndex(builder, loc, i));
+    Value v = memref::LoadOp::create(builder, loc, mem,
+                                     constantIndex(builder, loc, i));
     if (i == offsetIdx && offsetVal)
-      v = builder.create<arith::AddIOp>(loc, v, offsetVal);
+      v = arith::AddIOp::create(builder, loc, v, offsetVal);
     vs.push_back(v);
   }
   return vs;
@@ -535,10 +536,10 @@ void sparse_tensor::storeAll(OpBuilder &builder, Location loc, Value mem,
   for (const auto &v : llvm::enumerate(vs)) {
     const Value w =
         (offsetIdx == v.index() && offsetVal)
-            ? builder.create<arith::AddIOp>(loc, v.value(), offsetVal)
+            ? arith::AddIOp::create(builder, loc, v.value(), offsetVal)
             : v.value();
-    builder.create<memref::StoreOp>(loc, w, mem,
-                                    constantIndex(builder, loc, v.index()));
+    memref::StoreOp::create(builder, loc, w, mem,
+                            constantIndex(builder, loc, v.index()));
   }
 }
 
@@ -547,7 +548,7 @@ sparse_tensor::genToMemref(OpBuilder &builder, Location loc, Value tensor) {
   auto tTp = llvm::cast<TensorType>(tensor.getType());
   auto mTp = MemRefType::get(tTp.getShape(), tTp.getElementType());
   return cast<TypedValue<BaseMemRefType>>(
-      builder.create<bufferization::ToBufferOp>(loc, mTp, tensor).getResult());
+      bufferization::ToBufferOp::create(builder, loc, mTp, tensor).getResult());
 }
 
 Value sparse_tensor::createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc,
@@ -557,7 +558,7 @@ Value sparse_tensor::createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc,
   std::optional<unsigned> offset = enc.getStaticDimSliceOffset(dim);
   if (offset.has_value())
     return constantIndex(builder, loc, *offset);
-  return builder.create<ToSliceOffsetOp>(loc, tensor, APInt(64, dim));
+  return ToSliceOffsetOp::create(builder, loc, tensor, APInt(64, dim));
 }
 
 Value sparse_tensor::createOrFoldSliceStrideOp(OpBuilder &builder, Location loc,
@@ -567,7 +568,7 @@ Value sparse_tensor::createOrFoldSliceStrideOp(OpBuilder &builder, Location loc,
   std::optional<unsigned> stride = enc.getStaticDimSliceStride(dim);
   if (stride.has_value())
     return constantIndex(builder, loc, *stride);
-  return builder.create<ToSliceStrideOp>(loc, tensor, APInt(64, dim));
+  return ToSliceStrideOp::create(builder, loc, tensor, APInt(64, dim));
 }
 
 Value sparse_tensor::genReader(OpBuilder &builder, Location loc,
@@ -609,8 +610,8 @@ Value sparse_tensor::genReader(OpBuilder &builder, Location loc,
     // subsequent clients need the values (DCE will remove unused).
     for (Dimension d = 0; d < dimRank; d++) {
       if (stt.isDynamicDim(d))
-        dimSizesValues[d] = builder.create<memref::LoadOp>(
-            loc, dimSizesBuffer, constantIndex(builder, loc, d));
+        dimSizesValues[d] = memref::LoadOp::create(
+            builder, loc, dimSizesBuffer, constantIndex(builder, loc, d));
     }
   }
   return reader;
@@ -686,8 +687,8 @@ Value sparse_tensor::genMapBuffers(
     if (cm == 0) {
       lvlSz = dimSizesValues[d];
       if (cf != 0)
-        lvlSz = builder.create<arith::DivUIOp>(loc, lvlSz,
-                                               constantIndex(builder, loc, cf));
+        lvlSz = arith::DivUIOp::create(builder, loc, lvlSz,
+                                       constantIndex(builder, loc, cf));
     } else {
       lvlSz = constantIndex(builder, loc, cm);
     }

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
index dc017e6baa6dc..1c10dd5566184 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
@@ -97,8 +97,8 @@ class FuncCallOrInlineGenerator {
       // Create the function if not already exist.
       OpBuilder::InsertionGuard insertionGuard(builder);
       builder.setInsertionPoint(getParentOpOf<func::FuncOp>(builder));
-      func = builder.create<func::FuncOp>(
-          loc, funcName,
+      func = func::FuncOp::create(
+          builder, loc, funcName,
           FunctionType::get(context, params.getTypes(), retTypes));
       func.setPrivate();
       // Set the insertion point to the body of the function.
@@ -108,10 +108,10 @@ class FuncCallOrInlineGenerator {
       // Delegates to user to generate the actually implementation.
       SmallVector<Value> result =
           genImplementation(retTypes, args, builder, loc);
-      builder.create<func::ReturnOp>(loc, result);
+      func::ReturnOp::create(builder, loc, result);
     }
     // Returns the CallOp result.
-    func::CallOp call = builder.create<func::CallOp>(loc, func, params);
+    func::CallOp call = func::CallOp::create(builder, loc, func, params);
     return call.getResults();
   }
 
@@ -310,9 +310,9 @@ inline Value constantZero(OpBuilder &builder, Location loc, Type tp) {
   if (auto ctp = dyn_cast<ComplexType>(tp)) {
     auto zeroe = builder.getZeroAttr(ctp.getElementType());
     auto zeroa = builder.getArrayAttr({zeroe, zeroe});
-    return builder.create<complex::ConstantOp>(loc, tp, zeroa);
+    return complex::ConstantOp::create(builder, loc, tp, zeroa);
   }
-  return builder.create<arith::ConstantOp>(loc, tp, builder.getZeroAttr(tp));
+  return arith::ConstantOp::create(builder, loc, tp, builder.getZeroAttr(tp));
 }
 
 /// Generates a 1-valued constant of the given type.  This supports all
@@ -322,39 +322,39 @@ inline Value constantOne(OpBuilder &builder, Location loc, Type tp) {
     auto zeroe = builder.getZeroAttr(ctp.getElementType());
     auto onee = getOneAttr(builder, ctp.getElementType());
     auto zeroa = builder.getArrayAttr({onee, zeroe});
-    return builder.create<complex::ConstantOp>(loc, tp, zeroa);
+    return complex::ConstantOp::create(builder, loc, tp, zeroa);
   }
-  return builder.create<arith::ConstantOp>(loc, tp, getOneAttr(builder, tp));
+  return arith::ConstantOp::create(builder, loc, tp, getOneAttr(builder, tp));
 }
 
 /// Generates a constant of `index` type.
 inline Value constantIndex(OpBuilder &builder, Location loc, int64_t i) {
-  return builder.create<arith::ConstantIndexOp>(loc, i);
+  return arith::ConstantIndexOp::create(builder, loc, i);
 }
 
 /// Generates a constant of `i64` type.
 inline Value constantI64(OpBuilder &builder, Location loc, int64_t i) {
-  return builder.create<arith::ConstantIntOp>(loc, i, 64);
+  return arith::ConstantIntOp::create(builder, loc, i, 64);
 }
 
 /// Generates a constant of `i32` type.
 inline Value constantI32(OpBuilder &builder, Location loc, int32_t i) {
-  return builder.create<arith::ConstantIntOp>(loc, i, 32);
+  return arith::ConstantIntOp::create(builder, loc, i, 32);
 }
 
 /// Generates a constant of `i16` type.
 inline Value constantI16(OpBuilder &builder, Location loc, int16_t i) {
-  return builder.create<arith::ConstantIntOp>(loc, i, 16);
+  return arith::ConstantIntOp::create(builder, loc, i, 16);
 }
 
 /// Generates a constant of `i8` type.
 inline Value constantI8(OpBuilder &builder, Location loc, int8_t i) {
-  return builder.create<arith::ConstantIntOp>(loc, i, 8);
+  return arith::ConstantIntOp::create(builder, loc, i, 8);
 }
 
 /// Generates a constant of `i1` type.
 inline Value constantI1(OpBuilder &builder, Location loc, bool b) {
-  return builder.create<arith::ConstantIntOp>(loc, b, 1);
+  return arith::ConstantIntOp::create(builder, loc, b, 1);
 }
 
 /// Generates a constant of the given `Action`.
@@ -400,12 +400,12 @@ inline Value constantLevelTypeEncoding(OpBuilder &builder, Location loc,
 inline Value genValFromAttr(OpBuilder &builder, Location loc, Attribute attr) {
   if (auto complexAttr = dyn_cast<complex::NumberAttr>(attr)) {
     Type tp = cast<ComplexType>(complexAttr.getType()).getElementType();
-    return builder.create<complex::ConstantOp>(
-        loc, complexAttr.getType(),
+    return complex::ConstantOp::create(
+        builder, loc, complexAttr.getType(),
         builder.getArrayAttr({FloatAttr::get(tp, complexAttr.getReal()),
                               FloatAttr::get(tp, complexAttr.getImag())}));
   }
-  return builder.create<arith::ConstantOp>(loc, cast<TypedAttr>(attr));
+  return arith::ConstantOp::create(builder, loc, cast<TypedAttr>(attr));
 }
 
 // TODO: is this at the right place?

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
index a77e3036ac519..659282a995123 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
@@ -25,18 +25,18 @@ using namespace mlir::sparse_tensor;
 //===----------------------------------------------------------------------===//
 
 #define CMPI(p, l, r)                                                          \
-  (builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::p, (l), (r))       \
+  (arith::CmpIOp::create(builder, loc, arith::CmpIPredicate::p, (l), (r))      \
        .getResult())
 
 #define C_IDX(v) (constantIndex(builder, loc, (v)))
-#define YIELD(vs) (builder.create<scf::YieldOp>(loc, (vs)))
-#define ADDI(lhs, rhs) (builder.create<arith::AddIOp>(loc, (lhs), (rhs)))
-#define ANDI(lhs, rhs) (builder.create<arith::AndIOp>(loc, (lhs), (rhs)))
-#define SUBI(lhs, rhs) (builder.create<arith::SubIOp>(loc, (lhs), (rhs)))
-#define MULI(lhs, rhs) (builder.create<arith::MulIOp>(loc, (lhs), (rhs)))
-#define REMUI(lhs, rhs) (builder.create<arith::RemUIOp>(loc, (lhs), (rhs)))
-#define DIVUI(lhs, rhs) (builder.create<arith::DivUIOp>(loc, (lhs), (rhs)))
-#define SELECT(c, l, r) (builder.create<arith::SelectOp>(loc, (c), (l), (r)))
+#define YIELD(vs) (scf::YieldOp::create(builder, loc, (vs)))
+#define ADDI(lhs, rhs) (arith::AddIOp::create(builder, loc, (lhs), (rhs)))
+#define ANDI(lhs, rhs) (arith::AndIOp::create(builder, loc, (lhs), (rhs)))
+#define SUBI(lhs, rhs) (arith::SubIOp::create(builder, loc, (lhs), (rhs)))
+#define MULI(lhs, rhs) (arith::MulIOp::create(builder, loc, (lhs), (rhs)))
+#define REMUI(lhs, rhs) (arith::RemUIOp::create(builder, loc, (lhs), (rhs)))
+#define DIVUI(lhs, rhs) (arith::DivUIOp::create(builder, loc, (lhs), (rhs)))
+#define SELECT(c, l, r) (arith::SelectOp::create(builder, loc, (c), (l), (r)))
 
 //===----------------------------------------------------------------------===//
 // Debugging utils
@@ -45,8 +45,8 @@ using namespace mlir::sparse_tensor;
 #ifndef NDEBUG
 LLVM_ATTRIBUTE_UNUSED static void dumpIndexMemRef(OpBuilder &builder,
                                                   Location loc, Value memref) {
-  memref = builder.create<memref::CastOp>(
-      loc, UnrankedMemRefType::get(builder.getIndexType(), 0), memref);
+  memref = memref::CastOp::create(
+      builder, loc, UnrankedMemRefType::get(builder.getIndexType(), 0), memref);
   createFuncCall(builder, loc, "printMemrefInd", TypeRange{},
                  ValueRange{memref}, EmitCInterface::On);
 }
@@ -261,7 +261,7 @@ void LoopEmitter::initializeLoopEmit(
         denseTp = bufferization::getMemRefTypeWithFullyDynamicLayout(rtp);
 
       Value denseVal =
-          builder.create<bufferization::ToBufferOp>(loc, denseTp, tensor);
+          bufferization::ToBufferOp::create(builder, loc, denseTp, tensor);
       // Dense outputs need special handling.
       if (isOutput && updater)
         denseVal = updater(builder, loc, denseVal, tensor);
@@ -271,7 +271,7 @@ void LoopEmitter::initializeLoopEmit(
       // Annotated sparse tensors.
       // We also need the value buffer for all-dense annotated "sparse"
       // tensors.
-      valBuffer[t] = builder.create<ToValuesOp>(loc, tensor);
+      valBuffer[t] = ToValuesOp::create(builder, loc, tensor);
     }
   }
 
@@ -479,7 +479,7 @@ std::pair<Operation *, Value> LoopEmitter::emitForLoopOverTensorAtLvl(
   Value iv;
   if (isParallel) {
     scf::ParallelOp parOp =
-        builder.create<scf::ParallelOp>(loc, lo, hi, step, reduc);
+        scf::ParallelOp::create(builder, loc, lo, hi, step, reduc);
     builder.setInsertionPointToStart(parOp.getBody());
     assert(parOp.getNumReductions() == reduc.size());
     iv = parOp.getInductionVars()[0];
@@ -495,7 +495,7 @@ std::pair<Operation *, Value> LoopEmitter::emitForLoopOverTensorAtLvl(
       reduc[i] = parOp.getInitVals()[i];
     loop = parOp;
   } else {
-    scf::ForOp forOp = builder.create<scf::ForOp>(loc, lo, hi, step, reduc);
+    scf::ForOp forOp = scf::ForOp::create(builder, loc, lo, hi, step, reduc);
     builder.setInsertionPointToStart(forOp.getBody());
     iv = forOp.getInductionVar();
 
@@ -603,12 +603,12 @@ Operation *LoopEmitter::enterCoIterationOverTensorsAtLvls(
 
       // Extract and iterate over the iteration space.
       ExtractIterSpaceOp extractSpaceOp =
-          lvl == 0 ? builder.create<ExtractIterSpaceOp>(loc, t)
-                   : builder.create<ExtractIterSpaceOp>(
-                         loc, t, spIterVals[tid][lvl - 1], lvl);
+          lvl == 0 ? ExtractIterSpaceOp::create(builder, loc, t)
+                   : ExtractIterSpaceOp::create(builder, loc, t,
+                                                spIterVals[tid][lvl - 1], lvl);
 
-      IterateOp iterOp = builder.create<IterateOp>(
-          loc, extractSpaceOp.getExtractedSpace(), reduc);
+      IterateOp iterOp = IterateOp::create(
+          builder, loc, extractSpaceOp.getExtractedSpace(), reduc);
       spIterVals[tid][lvl] = iterOp.getIterator();
 
       // Update the reduction varaibles.
@@ -625,12 +625,12 @@ Operation *LoopEmitter::enterCoIterationOverTensorsAtLvls(
     for (auto [tid, lvl] : unpackTensorLevelRange(tidLvls)) {
       Value t = tensors[tid];
       ExtractIterSpaceOp extractSpaceOp =
-          lvl == 0 ? builder.create<ExtractIterSpaceOp>(loc, t)
-                   : builder.create<ExtractIterSpaceOp>(
-                         loc, t, spIterVals[tid][lvl - 1], lvl);
+          lvl == 0 ? ExtractIterSpaceOp::create(builder, loc, t)
+                   : ExtractIterSpaceOp::create(builder, loc, t,
+                                                spIterVals[tid][lvl - 1], lvl);
       spaces.push_back(extractSpaceOp.getExtractedSpace());
     }
-    auto coIterOp = builder.create<CoIterateOp>(loc, spaces, reduc, numCases);
+    auto coIterOp = CoIterateOp::create(builder, loc, spaces, reduc, numCases);
     // The CoIterationOp does not have insertion block nor induction variable.
     // TODO: the `struct LoopInfo` should be simplied after full migration.
     loopStack.emplace_back(tidLvls, coIterOp, /*insertion block*/ nullptr,
@@ -728,7 +728,7 @@ void LoopEmitter::exitForLoop(RewriterBase &rewriter, Location loc,
   if (emitStrategy == SparseEmitStrategy::kSparseIterator) {
     auto iterateOp = llvm::cast<IterateOp>(loopInfo.loop);
     assert(reduc.size() == iterateOp.getNumResults());
-    rewriter.create<sparse_tensor::YieldOp>(loc, reduc);
+    sparse_tensor::YieldOp::create(rewriter, loc, reduc);
     // Exit the loop.
     rewriter.setInsertionPointAfter(iterateOp);
     // In-place update reduction variables.
@@ -738,7 +738,7 @@ void LoopEmitter::exitForLoop(RewriterBase &rewriter, Location loc,
   if (auto forOp = llvm::dyn_cast<scf::ForOp>(loopInfo.loop)) {
     if (!reduc.empty()) {
       assert(reduc.size() == forOp.getNumResults());
-      rewriter.create<scf::YieldOp>(loc, reduc);
+      scf::YieldOp::create(rewriter, loc, reduc);
     }
     // Exit the loop.
     rewriter.setInsertionPointAfter(forOp);
@@ -777,7 +777,7 @@ void LoopEmitter::exitForLoop(RewriterBase &rewriter, Location loc,
 #endif // NDEBUG
 
       rewriter.setInsertionPointAfter(redExp);
-      auto redOp = rewriter.create<scf::ReduceOp>(loc, curVal);
+      auto redOp = scf::ReduceOp::create(rewriter, loc, curVal);
       // Attach to the reduction op.
       Block *redBlock = &redOp.getReductions().front().front();
       rewriter.setInsertionPointToEnd(redBlock);
@@ -789,7 +789,7 @@ void LoopEmitter::exitForLoop(RewriterBase &rewriter, Location loc,
       // Erases the out-dated reduction expression.
       rewriter.eraseOp(redExp);
       rewriter.setInsertionPointToEnd(redBlock);
-      rewriter.create<scf::ReduceReturnOp>(loc, newRed->getResult(0));
+      scf::ReduceReturnOp::create(rewriter, loc, newRed->getResult(0));
     }
     rewriter.setInsertionPointAfter(parOp);
     // In-place update reduction variables.
@@ -863,7 +863,7 @@ void LoopEmitter::exitCurrentLoop(RewriterBase &rewriter, Location loc,
   if (emitStrategy == SparseEmitStrategy::kSparseIterator) {
     Operation *p = loopInfo.loop;
     if (isa<IterateOp>(p))
-      rewriter.create<sparse_tensor::YieldOp>(loc, reduc);
+      sparse_tensor::YieldOp::create(rewriter, loc, reduc);
 
     // Exit the loop.
     rewriter.setInsertionPointAfter(p);
@@ -929,7 +929,7 @@ std::pair<Operation *, Value> sparse_tensor::genCoIteration(
   // Ensures all operands are valid.
   assert(!llvm::is_contained(ivs, nullptr));
   TypeRange types = ValueRange(ivs).getTypes();
-  auto whileOp = builder.create<scf::WhileOp>(loc, types, ivs);
+  auto whileOp = scf::WhileOp::create(builder, loc, types, ivs);
 
   SmallVector<Location> locs(types.size(), loc);
   Block *before = builder.createBlock(&whileOp.getBefore(), {}, types, locs);
@@ -948,7 +948,7 @@ std::pair<Operation *, Value> sparse_tensor::genCoIteration(
   // The remaining block arguments are user-provided reduction values and an
   // optional universal index. Make sure their sizes match.
   assert(bArgs.size() == reduc.size() + (uniIdx ? 1 : 0));
-  builder.create<scf::ConditionOp>(loc, whileCond, before->getArguments());
+  scf::ConditionOp::create(builder, loc, whileCond, before->getArguments());
 
   // Generates loop body.
   builder.setInsertionPointToStart(after);

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.cpp
index 1c8a4789e2065..3b3b0aadf638c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.cpp
@@ -78,15 +78,16 @@ SparseTensorTypeToBufferConverter::SparseTensorTypeToBufferConverter() {
 
 Value SparseTensorSpecifier::getInitValue(OpBuilder &builder, Location loc,
                                           SparseTensorType stt) {
-  return builder.create<StorageSpecifierInitOp>(
-      loc, StorageSpecifierType::get(stt.getEncoding()));
+  return StorageSpecifierInitOp::create(
+      builder, loc, StorageSpecifierType::get(stt.getEncoding()));
 }
 
 Value SparseTensorSpecifier::getSpecifierField(OpBuilder &builder, Location loc,
                                                StorageSpecifierKind kind,
                                                std::optional<Level> lvl) {
-  return builder.create<GetStorageSpecifierOp>(
-      loc, specifier, kind, optionalLevelAttr(specifier.getContext(), lvl));
+  return GetStorageSpecifierOp::create(
+      builder, loc, specifier, kind,
+      optionalLevelAttr(specifier.getContext(), lvl));
 }
 
 void SparseTensorSpecifier::setSpecifierField(OpBuilder &builder, Location loc,
@@ -95,8 +96,9 @@ void SparseTensorSpecifier::setSpecifierField(OpBuilder &builder, Location loc,
                                               std::optional<Level> lvl) {
   // TODO: make `v` have type `TypedValue<IndexType>` instead.
   assert(v.getType().isIndex());
-  specifier = builder.create<SetStorageSpecifierOp>(
-      loc, specifier, kind, optionalLevelAttr(specifier.getContext(), lvl), v);
+  specifier = SetStorageSpecifierOp::create(
+      builder, loc, specifier, kind,
+      optionalLevelAttr(specifier.getContext(), lvl), v);
 }
 
 //===----------------------------------------------------------------------===//
@@ -111,9 +113,9 @@ Value sparse_tensor::SparseTensorDescriptor::getCrdMemRefOrView(
 
   Value stride = constantIndex(builder, loc, rType.getLvlRank() - cooStart);
   Value size = getCrdMemSize(builder, loc, cooStart);
-  size = builder.create<arith::DivUIOp>(loc, size, stride);
-  return builder.create<memref::SubViewOp>(
-      loc, getMemRefField(SparseTensorFieldKind::CrdMemRef, cooStart),
+  size = arith::DivUIOp::create(builder, loc, size, stride);
+  return memref::SubViewOp::create(
+      builder, loc, getMemRefField(SparseTensorFieldKind::CrdMemRef, cooStart),
       /*offset=*/ValueRange{constantIndex(builder, loc, lvl - cooStart)},
       /*size=*/ValueRange{size},
       /*step=*/ValueRange{stride});

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
index 869c7864d7535..45d142a807c36 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
@@ -231,7 +231,7 @@ class MutSparseTensorDescriptor
 /// Packs the given values as a "tuple" value.
 inline Value genTuple(OpBuilder &builder, Location loc, Type tp,
                       ValueRange values) {
-  return builder.create<UnrealizedConversionCastOp>(loc, TypeRange(tp), values)
+  return UnrealizedConversionCastOp::create(builder, loc, TypeRange(tp), values)
       .getResult(0);
 }
 

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorIterator.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorIterator.cpp
index aad5e97ed14ab..46d0baac58f06 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorIterator.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorIterator.cpp
@@ -22,23 +22,23 @@ using ValueTuple = std::tuple<Value, Value, Value>;
 // File local helper functions/macros.
 //===----------------------------------------------------------------------===//
 #define CMPI(p, lhs, rhs)                                                      \
-  (b.create<arith::CmpIOp>(l, arith::CmpIPredicate::p, (lhs), (rhs))           \
+  (arith::CmpIOp::create(b, l, arith::CmpIPredicate::p, (lhs), (rhs))          \
        .getResult())
 
 #define C_FALSE (constantI1(b, l, false))
 #define C_TRUE (constantI1(b, l, true))
 #define C_IDX(v) (constantIndex(b, l, (v)))
-#define YIELD(vs) (b.create<scf::YieldOp>(l, (vs)))
-#define ADDI(lhs, rhs) (b.create<arith::AddIOp>(l, (lhs), (rhs)).getResult())
-#define ORI(lhs, rhs) (b.create<arith::OrIOp>(l, (lhs), (rhs)).getResult())
-#define ANDI(lhs, rhs) (b.create<arith::AndIOp>(l, (lhs), (rhs)).getResult())
-#define SUBI(lhs, rhs) (b.create<arith::SubIOp>(l, (lhs), (rhs)).getResult())
-#define MULI(lhs, rhs) (b.create<arith::MulIOp>(l, (lhs), (rhs)).getResult())
-#define MINUI(lhs, rhs) (b.create<arith::MinUIOp>(l, (lhs), (rhs)).getResult())
-#define REMUI(lhs, rhs) (b.create<arith::RemUIOp>(l, (lhs), (rhs)).getResult())
-#define DIVUI(lhs, rhs) (b.create<arith::DivUIOp>(l, (lhs), (rhs)).getResult())
+#define YIELD(vs) (scf::YieldOp::create(b, l, (vs)))
+#define ADDI(lhs, rhs) (arith::AddIOp::create(b, l, (lhs), (rhs)).getResult())
+#define ORI(lhs, rhs) (arith::OrIOp::create(b, l, (lhs), (rhs)).getResult())
+#define ANDI(lhs, rhs) (arith::AndIOp::create(b, l, (lhs), (rhs)).getResult())
+#define SUBI(lhs, rhs) (arith::SubIOp::create(b, l, (lhs), (rhs)).getResult())
+#define MULI(lhs, rhs) (arith::MulIOp::create(b, l, (lhs), (rhs)).getResult())
+#define MINUI(lhs, rhs) (arith::MinUIOp::create(b, l, (lhs), (rhs)).getResult())
+#define REMUI(lhs, rhs) (arith::RemUIOp::create(b, l, (lhs), (rhs)).getResult())
+#define DIVUI(lhs, rhs) (arith::DivUIOp::create(b, l, (lhs), (rhs)).getResult())
 #define SELECT(c, lhs, rhs)                                                    \
-  (b.create<arith::SelectOp>(l, (c), (lhs), (rhs)).getResult())
+  (arith::SelectOp::create(b, l, (c), (lhs), (rhs)).getResult())
 
 //===----------------------------------------------------------------------===//
 // SparseTensorLevel derived classes.
@@ -150,19 +150,19 @@ class CompressedLevel : public SparseLevel</*hasPosBuf=*/true> {
       return loadRange();
 
     SmallVector<Type, 2> types{b.getIndexType(), b.getIndexType()};
-    scf::IfOp posRangeIf = b.create<scf::IfOp>(l, types, inPadZone, true);
+    scf::IfOp posRangeIf = scf::IfOp::create(b, l, types, inPadZone, true);
     // True branch, returns a "fake" empty range [0, 0) if parent
     // iterator is in pad zone.
     b.setInsertionPointToStart(posRangeIf.thenBlock());
 
     SmallVector<Value, 2> emptyRange{C_IDX(0), C_IDX(0)};
-    b.create<scf::YieldOp>(l, emptyRange);
+    scf::YieldOp::create(b, l, emptyRange);
 
     // False branch, returns the actual range.
     b.setInsertionPointToStart(posRangeIf.elseBlock());
     auto [pLo, pHi] = loadRange();
     SmallVector<Value, 2> loadedRange{pLo, pHi};
-    b.create<scf::YieldOp>(l, loadedRange);
+    scf::YieldOp::create(b, l, loadedRange);
 
     b.setInsertionPointAfter(posRangeIf);
     ValueRange posRange = posRangeIf.getResults();
@@ -248,7 +248,7 @@ static scf::ValueVector genWhenInBound(
     llvm::function_ref<scf::ValueVector(OpBuilder &, Location, Value)>
         builder) {
   TypeRange ifRetTypes = elseRet.getTypes();
-  auto ifOp = b.create<scf::IfOp>(l, ifRetTypes, it.genNotEnd(b, l), true);
+  auto ifOp = scf::IfOp::create(b, l, ifRetTypes, it.genNotEnd(b, l), true);
 
   b.setInsertionPointToStart(ifOp.thenBlock());
   Value crd = it.deref(b, l);
@@ -732,29 +732,29 @@ class NonEmptySubSectIterator : public SparseIterator {
   //      [itVal0, itVal1, ..., pNx0],
   //      ...]
   Value allocSubSectPosBuf(OpBuilder &b, Location l) {
-    return b.create<memref::AllocaOp>(
-        l,
+    return memref::AllocaOp::create(
+        b, l,
         MemRefType::get({ShapedType::kDynamic, tupleSz + 1}, b.getIndexType()),
         maxTupleCnt);
   }
 
   void storeNxLvlStart(OpBuilder &b, Location l, Value tupleId,
                        Value start) const {
-    b.create<memref::StoreOp>(l, start, subSectPosBuf,
-                              ValueRange{tupleId, C_IDX(tupleSz)});
+    memref::StoreOp::create(b, l, start, subSectPosBuf,
+                            ValueRange{tupleId, C_IDX(tupleSz)});
   }
 
   Value loadNxLvlStart(OpBuilder &b, Location l, Value tupleId) const {
-    return b.create<memref::LoadOp>(l, subSectPosBuf,
-                                    ValueRange{tupleId, C_IDX(tupleSz)});
+    return memref::LoadOp::create(b, l, subSectPosBuf,
+                                  ValueRange{tupleId, C_IDX(tupleSz)});
   }
 
   void storeCursorVals(OpBuilder &b, Location l, Value tupleId,
                        ValueRange itVals) const {
     assert(itVals.size() == tupleSz);
     for (unsigned i = 0; i < tupleSz; i++) {
-      b.create<memref::StoreOp>(l, itVals[i], subSectPosBuf,
-                                ValueRange{tupleId, C_IDX(i)});
+      memref::StoreOp::create(b, l, itVals[i], subSectPosBuf,
+                              ValueRange{tupleId, C_IDX(i)});
     }
   }
 
@@ -762,8 +762,8 @@ class NonEmptySubSectIterator : public SparseIterator {
                                     Value tupleId) const {
     SmallVector<Value> ret;
     for (unsigned i = 0; i < tupleSz; i++) {
-      Value v = b.create<memref::LoadOp>(l, subSectPosBuf,
-                                         ValueRange{tupleId, C_IDX(i)});
+      Value v = memref::LoadOp::create(b, l, subSectPosBuf,
+                                       ValueRange{tupleId, C_IDX(i)});
       ret.push_back(v);
     }
     return ret;
@@ -1043,7 +1043,7 @@ ValueRange SparseIterator::forward(OpBuilder &b, Location l) {
 }
 
 ValueRange SparseIterator::forwardIf(OpBuilder &b, Location l, Value cond) {
-  auto ifOp = b.create<scf::IfOp>(l, getCursor().getTypes(), cond, true);
+  auto ifOp = scf::IfOp::create(b, l, getCursor().getTypes(), cond, true);
   // Generate else branch first, otherwise iterator values will be updated by
   // `forward()`.
   b.setInsertionPointToStart(ifOp.elseBlock());
@@ -1058,12 +1058,12 @@ ValueRange SparseIterator::forwardIf(OpBuilder &b, Location l, Value cond) {
 }
 
 Value DedupIterator::genSegmentHigh(OpBuilder &b, Location l, Value pos) {
-  auto whileOp = b.create<scf::WhileOp>(
-      l, pos.getType(), pos,
+  auto whileOp = scf::WhileOp::create(
+      b, l, pos.getType(), pos,
       /*beforeBuilder=*/
       [this, pos](OpBuilder &b, Location l, ValueRange ivs) {
         Value inBound = CMPI(ult, ivs.front(), posHi);
-        auto ifInBound = b.create<scf::IfOp>(l, b.getI1Type(), inBound, true);
+        auto ifInBound = scf::IfOp::create(b, l, b.getI1Type(), inBound, true);
         {
           OpBuilder::InsertionGuard guard(b);
           // If in bound, load the next coordinates and check duplication.
@@ -1076,7 +1076,7 @@ Value DedupIterator::genSegmentHigh(OpBuilder &b, Location l, Value pos) {
           b.setInsertionPointToStart(ifInBound.elseBlock());
           YIELD(constantI1(b, l, false));
         }
-        b.create<scf::ConditionOp>(l, ifInBound.getResults()[0], ivs);
+        scf::ConditionOp::create(b, l, ifInBound.getResults()[0], ivs);
       },
       /*afterBuilder=*/
       [](OpBuilder &b, Location l, ValueRange ivs) {
@@ -1137,8 +1137,8 @@ ValueRange FilterIterator::forwardImpl(OpBuilder &b, Location l) {
 
   SmallVector<Value> whileArgs(getCursor().begin(), getCursor().end());
   whileArgs.push_back(isFirst);
-  auto whileOp = b.create<scf::WhileOp>(
-      l, ValueRange(whileArgs).getTypes(), whileArgs,
+  auto whileOp = scf::WhileOp::create(
+      b, l, ValueRange(whileArgs).getTypes(), whileArgs,
       /*beforeBuilder=*/
       [this](OpBuilder &b, Location l, ValueRange ivs) {
         ValueRange isFirst = linkNewScope(ivs);
@@ -1154,7 +1154,7 @@ ValueRange FilterIterator::forwardImpl(OpBuilder &b, Location l) {
                              ret = ORI(ret, llvm::getSingleElement(isFirst));
                              return {ret};
                            });
-        b.create<scf::ConditionOp>(l, cont.front(), ivs);
+        scf::ConditionOp::create(b, l, cont.front(), ivs);
       },
       /*afterBuilder=*/
       [this](OpBuilder &b, Location l, ValueRange ivs) {
@@ -1219,8 +1219,8 @@ ValueRange NonEmptySubSectIterator::inflateSubSectTree(
     SmallVector<Value> iterArgs;
     iterArgs.push_back(C_IDX(0));
     iterArgs.append(reduc.begin(), reduc.end());
-    auto forEachLeaf = b.create<scf::ForOp>(
-        l, /*lb=*/C_IDX(0), /*ub=*/tupleCnt, /*step=*/C_IDX(1), iterArgs,
+    auto forEachLeaf = scf::ForOp::create(
+        b, l, /*lb=*/C_IDX(0), /*ub=*/tupleCnt, /*step=*/C_IDX(1), iterArgs,
         [&helper, &builder](OpBuilder &b, Location l, Value tupleId,
                             ValueRange iterArgs) {
           // Deserialize the iterator at the cached position (tupleId).
@@ -1235,12 +1235,12 @@ ValueRange NonEmptySubSectIterator::inflateSubSectTree(
           SmallVector<Value> whileArgs(helper.wrap.getCursor());
           whileArgs.append(iterArgs.begin(), iterArgs.end());
 
-          auto whileOp = b.create<scf::WhileOp>(
-              l, ValueRange(whileArgs).getTypes(), whileArgs,
+          auto whileOp = scf::WhileOp::create(
+              b, l, ValueRange(whileArgs).getTypes(), whileArgs,
               /*beforeBuilder=*/
               [&helper](OpBuilder &b, Location l, ValueRange ivs) {
                 helper.wrap.linkNewScope(ivs);
-                b.create<scf::ConditionOp>(l, helper.genNotEnd(b, l), ivs);
+                scf::ConditionOp::create(b, l, helper.genNotEnd(b, l), ivs);
               },
               /*afterBuilder=*/
               [&helper, &builder](OpBuilder &b, Location l, ValueRange ivs) {
@@ -1267,8 +1267,8 @@ ValueRange NonEmptySubSectIterator::inflateSubSectTree(
                                      ValueRange reduc) {
     assert(!parent || parent->lvl + 1 == lvl);
     delegate->genInit(b, l, parent);
-    auto forOp = b.create<scf::ForOp>(
-        l, /*lb=*/C_IDX(0), /*ub=*/subSectSz, /*step=*/C_IDX(1), reduc,
+    auto forOp = scf::ForOp::create(
+        b, l, /*lb=*/C_IDX(0), /*ub=*/subSectSz, /*step=*/C_IDX(1), reduc,
         [&](OpBuilder &b, Location l, Value crd, ValueRange iterArgs) {
           helper.locate(b, l, crd);
           scf::ValueVector nx = builder(b, l, &helper.wrap, iterArgs);
@@ -1411,7 +1411,7 @@ ValueRange NonEmptySubSectIterator::forwardImpl(OpBuilder &b, Location l) {
   // if (offset + size > parents.size)
   //   isNonEmpty = false;
   Value fastPathP = CMPI(ugt, getMinCrd(), getAbsOff());
-  auto ifOp = b.create<scf::IfOp>(l, getCursor().getTypes(), fastPathP, true);
+  auto ifOp = scf::IfOp::create(b, l, getCursor().getTypes(), fastPathP, true);
   {
     OpBuilder::InsertionGuard guard(b);
     // Take the fast path
@@ -1448,7 +1448,7 @@ ValueRange NonEmptySubSectIterator::forwardImpl(OpBuilder &b, Location l) {
                 Value isMin = CMPI(eq, crd, getMinCrd());
                 delegate->forwardIf(b, l, isMin);
                 // Update the forwarded iterator values if needed.
-                auto ifIsMin = b.create<scf::IfOp>(l, isMin, false);
+                auto ifIsMin = scf::IfOp::create(b, l, isMin, false);
                 b.setInsertionPointToStart(&ifIsMin.getThenRegion().front());
                 storeCursorVals(b, l, tupleId, delegate->serialize());
                 b.setInsertionPointAfter(ifIsMin);
@@ -1458,8 +1458,8 @@ ValueRange NonEmptySubSectIterator::forwardImpl(OpBuilder &b, Location l) {
                 return genWhenInBound(b, l, *delegate, /*elseRet=*/iterArgs,
                                       [nxMin](OpBuilder &b, Location l,
                                               Value crd) -> scf::ValueVector {
-                                        Value nx = b.create<arith::MinUIOp>(
-                                            l, crd, nxMin);
+                                        Value nx = arith::MinUIOp::create(
+                                            b, l, crd, nxMin);
                                         return {nx, C_TRUE};
                                       });
               });
@@ -1480,7 +1480,7 @@ ValueRange NonEmptySubSectIterator::forwardImpl(OpBuilder &b, Location l) {
 
   // We should at least forward the offset by one.
   Value minAbsOff = ADDI(getAbsOff(), c1);
-  nxAbsOff = b.create<arith::MaxUIOp>(l, minAbsOff, nxAbsOff);
+  nxAbsOff = arith::MaxUIOp::create(b, l, minAbsOff, nxAbsOff);
 
   seek(ValueRange{nxMinCrd, nxAbsOff, nxNotEnd});
   // The coordinate should not exceeds the space upper bound.
@@ -1581,16 +1581,17 @@ sparse_tensor::makeSparseTensorLevel(OpBuilder &b, Location l, Value t,
   auto stt = getSparseTensorType(t);
 
   LevelType lt = stt.getLvlType(lvl);
-  Value sz = stt.hasEncoding() ? b.create<LvlOp>(l, t, lvl).getResult()
-                               : b.create<tensor::DimOp>(l, t, lvl).getResult();
+  Value sz = stt.hasEncoding()
+                 ? LvlOp::create(b, l, t, lvl).getResult()
+                 : tensor::DimOp::create(b, l, t, lvl).getResult();
 
   SmallVector<Value, 2> buffers;
   if (lt.isWithPosLT()) {
-    Value pos = b.create<ToPositionsOp>(l, t, lvl);
+    Value pos = ToPositionsOp::create(b, l, t, lvl);
     buffers.push_back(pos);
   }
   if (lt.isWithCrdLT()) {
-    Value pos = b.create<ToCoordinatesOp>(l, t, lvl);
+    Value pos = ToCoordinatesOp::create(b, l, t, lvl);
     buffers.push_back(pos);
   }
   return makeSparseTensorLevel(lt, sz, buffers, tid, lvl);

diff  --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index 0258f797143cb..5847fecc45404 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -1563,7 +1563,7 @@ static Value insertYieldOp(RewriterBase &rewriter, Location loc, Region &region,
   Block &clonedBlock = tmpRegion.front();
   YieldOp clonedYield = cast<YieldOp>(clonedBlock.getTerminator());
   // Merge cloned block and return yield value.
-  Operation *placeholder = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+  Operation *placeholder = arith::ConstantIndexOp::create(rewriter, loc, 0);
   rewriter.inlineBlockBefore(&tmpRegion.front(), placeholder, vals);
   Value val = clonedYield.getSingleResult();
   rewriter.eraseOp(clonedYield);
@@ -1603,16 +1603,16 @@ static Value buildRelu(RewriterBase &rewriter, Location loc, Value v0,
                        Attribute attr) {
   Type tp = v0.getType();
   auto zero =
-      rewriter.create<arith::ConstantOp>(loc, tp, rewriter.getZeroAttr(tp));
+      arith::ConstantOp::create(rewriter, loc, tp, rewriter.getZeroAttr(tp));
   Value cmp;
   if (isa<FloatType>(tp)) {
     auto pred = llvm::cast<arith::CmpFPredicateAttr>(attr);
-    cmp = rewriter.create<arith::CmpFOp>(loc, pred, v0, zero);
+    cmp = arith::CmpFOp::create(rewriter, loc, pred, v0, zero);
   } else {
     auto pred = llvm::cast<arith::CmpIPredicateAttr>(attr);
-    cmp = rewriter.create<arith::CmpIOp>(loc, pred, v0, zero);
+    cmp = arith::CmpIOp::create(rewriter, loc, pred, v0, zero);
   }
-  return rewriter.create<arith::SelectOp>(loc, cmp, v0, zero);
+  return arith::SelectOp::create(rewriter, loc, cmp, v0, zero);
 }
 
 Value Merger::buildExp(RewriterBase &rewriter, Location loc, ExprId e, Value v0,
@@ -1627,128 +1627,128 @@ Value Merger::buildExp(RewriterBase &rewriter, Location loc, ExprId e, Value v0,
     llvm_unreachable("unexpected non-op");
   // Unary operations.
   case TensorExp::Kind::kAbsF:
-    return rewriter.create<math::AbsFOp>(loc, v0);
+    return math::AbsFOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kAbsC: {
     auto type = cast<ComplexType>(v0.getType());
     auto eltType = cast<FloatType>(type.getElementType());
-    return rewriter.create<complex::AbsOp>(loc, eltType, v0);
+    return complex::AbsOp::create(rewriter, loc, eltType, v0);
   }
   case TensorExp::Kind::kAbsI:
-    return rewriter.create<math::AbsIOp>(loc, v0);
+    return math::AbsIOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kCeilF:
-    return rewriter.create<math::CeilOp>(loc, v0);
+    return math::CeilOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kFloorF:
-    return rewriter.create<math::FloorOp>(loc, v0);
+    return math::FloorOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kSqrtF:
-    return rewriter.create<math::SqrtOp>(loc, v0);
+    return math::SqrtOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kSqrtC:
-    return rewriter.create<complex::SqrtOp>(loc, v0);
+    return complex::SqrtOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kExpm1F:
-    return rewriter.create<math::ExpM1Op>(loc, v0);
+    return math::ExpM1Op::create(rewriter, loc, v0);
   case TensorExp::Kind::kExpm1C:
-    return rewriter.create<complex::Expm1Op>(loc, v0);
+    return complex::Expm1Op::create(rewriter, loc, v0);
   case TensorExp::Kind::kLog1pF:
-    return rewriter.create<math::Log1pOp>(loc, v0);
+    return math::Log1pOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kLog1pC:
-    return rewriter.create<complex::Log1pOp>(loc, v0);
+    return complex::Log1pOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kRelu:
     return buildRelu(rewriter, loc, v0, expr.attr);
   case TensorExp::Kind::kSinF:
-    return rewriter.create<math::SinOp>(loc, v0);
+    return math::SinOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kSinC:
-    return rewriter.create<complex::SinOp>(loc, v0);
+    return complex::SinOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kTanhF:
-    return rewriter.create<math::TanhOp>(loc, v0);
+    return math::TanhOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kTanhC:
-    return rewriter.create<complex::TanhOp>(loc, v0);
+    return complex::TanhOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kNegF:
-    return rewriter.create<arith::NegFOp>(loc, v0);
+    return arith::NegFOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kNegC:
-    return rewriter.create<complex::NegOp>(loc, v0);
+    return complex::NegOp::create(rewriter, loc, v0);
   case TensorExp::Kind::kNegI: // no negi in std
-    return rewriter.create<arith::SubIOp>(
-        loc,
-        rewriter.create<arith::ConstantOp>(loc, v0.getType(),
-                                           rewriter.getZeroAttr(v0.getType())),
+    return arith::SubIOp::create(
+        rewriter, loc,
+        arith::ConstantOp::create(rewriter, loc, v0.getType(),
+                                  rewriter.getZeroAttr(v0.getType())),
         v0);
   case TensorExp::Kind::kTruncF:
-    return rewriter.create<arith::TruncFOp>(loc, inferType(e, v0), v0);
+    return arith::TruncFOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kExtF:
-    return rewriter.create<arith::ExtFOp>(loc, inferType(e, v0), v0);
+    return arith::ExtFOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kCastFS:
-    return rewriter.create<arith::FPToSIOp>(loc, inferType(e, v0), v0);
+    return arith::FPToSIOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kCastFU:
-    return rewriter.create<arith::FPToUIOp>(loc, inferType(e, v0), v0);
+    return arith::FPToUIOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kCastSF:
-    return rewriter.create<arith::SIToFPOp>(loc, inferType(e, v0), v0);
+    return arith::SIToFPOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kCastUF:
-    return rewriter.create<arith::UIToFPOp>(loc, inferType(e, v0), v0);
+    return arith::UIToFPOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kCastS:
-    return rewriter.create<arith::ExtSIOp>(loc, inferType(e, v0), v0);
+    return arith::ExtSIOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kCastU:
-    return rewriter.create<arith::ExtUIOp>(loc, inferType(e, v0), v0);
+    return arith::ExtUIOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kCastIdx:
-    return rewriter.create<arith::IndexCastOp>(loc, inferType(e, v0), v0);
+    return arith::IndexCastOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kTruncI:
-    return rewriter.create<arith::TruncIOp>(loc, inferType(e, v0), v0);
+    return arith::TruncIOp::create(rewriter, loc, inferType(e, v0), v0);
   case TensorExp::Kind::kCIm: {
     auto type = cast<ComplexType>(v0.getType());
     auto eltType = cast<FloatType>(type.getElementType());
-    return rewriter.create<complex::ImOp>(loc, eltType, v0);
+    return complex::ImOp::create(rewriter, loc, eltType, v0);
   }
   case TensorExp::Kind::kCRe: {
     auto type = cast<ComplexType>(v0.getType());
     auto eltType = cast<FloatType>(type.getElementType());
-    return rewriter.create<complex::ReOp>(loc, eltType, v0);
+    return complex::ReOp::create(rewriter, loc, eltType, v0);
   }
   case TensorExp::Kind::kBitCast:
-    return rewriter.create<arith::BitcastOp>(loc, inferType(e, v0), v0);
+    return arith::BitcastOp::create(rewriter, loc, inferType(e, v0), v0);
   // Binary operations.
   case TensorExp::Kind::kMulF:
-    return rewriter.create<arith::MulFOp>(loc, v0, v1);
+    return arith::MulFOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kMulC:
-    return rewriter.create<complex::MulOp>(loc, v0, v1);
+    return complex::MulOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kMulI:
-    return rewriter.create<arith::MulIOp>(loc, v0, v1);
+    return arith::MulIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kDivF:
-    return rewriter.create<arith::DivFOp>(loc, v0, v1);
+    return arith::DivFOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kDivC:
-    return rewriter.create<complex::DivOp>(loc, v0, v1);
+    return complex::DivOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kDivS:
-    return rewriter.create<arith::DivSIOp>(loc, v0, v1);
+    return arith::DivSIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kDivU:
-    return rewriter.create<arith::DivUIOp>(loc, v0, v1);
+    return arith::DivUIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kAddF:
-    return rewriter.create<arith::AddFOp>(loc, v0, v1);
+    return arith::AddFOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kAddC:
-    return rewriter.create<complex::AddOp>(loc, v0, v1);
+    return complex::AddOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kAddI:
-    return rewriter.create<arith::AddIOp>(loc, v0, v1);
+    return arith::AddIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kSubF:
-    return rewriter.create<arith::SubFOp>(loc, v0, v1);
+    return arith::SubFOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kSubC:
-    return rewriter.create<complex::SubOp>(loc, v0, v1);
+    return complex::SubOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kSubI:
-    return rewriter.create<arith::SubIOp>(loc, v0, v1);
+    return arith::SubIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kAndI:
-    return rewriter.create<arith::AndIOp>(loc, v0, v1);
+    return arith::AndIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kOrI:
-    return rewriter.create<arith::OrIOp>(loc, v0, v1);
+    return arith::OrIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kXorI:
-    return rewriter.create<arith::XOrIOp>(loc, v0, v1);
+    return arith::XOrIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kShrS:
-    return rewriter.create<arith::ShRSIOp>(loc, v0, v1);
+    return arith::ShRSIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kShrU:
-    return rewriter.create<arith::ShRUIOp>(loc, v0, v1);
+    return arith::ShRUIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kShlI:
-    return rewriter.create<arith::ShLIOp>(loc, v0, v1);
+    return arith::ShLIOp::create(rewriter, loc, v0, v1);
   case TensorExp::Kind::kCmpI: {
     auto predicate = llvm::cast<arith::CmpIPredicateAttr>(expr.attr);
-    return rewriter.create<arith::CmpIOp>(loc, predicate, v0, v1);
+    return arith::CmpIOp::create(rewriter, loc, predicate, v0, v1);
   }
   case TensorExp::Kind::kCmpF: {
     auto predicate = llvm::cast<arith::CmpFPredicateAttr>(expr.attr);
-    return rewriter.create<arith::CmpFOp>(loc, predicate, v0, v1);
+    return arith::CmpFOp::create(rewriter, loc, predicate, v0, v1);
   }
   case TensorExp::Kind::kBinaryBranch: // semi-ring ops with custom logic.
     return insertYieldOp(rewriter, loc, *expr.op->getBlock()->getParent(),


        


More information about the Mlir-commits mailing list