[Mlir-commits] [mlir] [mlir][NFC] update `mlir/Dialect` create APIs (23/n) (PR #149930)

Maksim Levental llvmlistbot at llvm.org
Wed Jul 23 07:07:28 PDT 2025


https://github.com/makslevental updated https://github.com/llvm/llvm-project/pull/149930

>From 49d2e730e06145ac3df0b00c39bc05b828ca9508 Mon Sep 17 00:00:00 2001
From: max <maksim.levental at gmail.com>
Date: Mon, 21 Jul 2025 18:21:44 -0400
Subject: [PATCH] [mlir][NFC] update `mlir/Dialect` create APIs (22/n)

See https://github.com/llvm/llvm-project/pull/147168 for more info.
---
 .../Extensions/MeshShardingExtensions.cpp     |  10 +-
 mlir/lib/Dialect/Tensor/IR/TensorOps.cpp      | 138 +++++++++---------
 .../Tensor/IR/TensorTilingInterfaceImpl.cpp   |  30 ++--
 .../TransformOps/TensorTransformOps.cpp       |   4 +-
 .../BufferizableOpInterfaceImpl.cpp           |  81 +++++-----
 .../Tensor/Transforms/EmptyOpPatterns.cpp     |   5 +-
 .../ExtractSliceFromReshapeUtils.cpp          |  15 +-
 .../Tensor/Transforms/FoldTensorSubsetOps.cpp |   6 +-
 .../Transforms/IndependenceTransforms.cpp     |  19 +--
 .../Tensor/Transforms/ReshapePatterns.cpp     |  22 +--
 .../Tensor/Transforms/RewriteAsConstant.cpp   |   2 +-
 .../Transforms/RuntimeOpVerification.cpp      |  63 ++++----
 .../SubsetInsertionOpInterfaceImpl.cpp        |   4 +-
 .../SwapExtractSliceWithProducerPatterns.cpp  |   6 +-
 mlir/lib/Dialect/Tensor/Utils/Utils.cpp       |   6 +-
 15 files changed, 209 insertions(+), 202 deletions(-)

diff --git a/mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp b/mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp
index 26406ceef082c..7e4a5acb9867d 100644
--- a/mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp
+++ b/mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp
@@ -74,12 +74,12 @@ struct CreatorOpShardingInterface
         if (!oldType.isDynamicDim(i) && shardType.isDynamicDim(i)) {
           if (!newSharding) {
             newSharding =
-                builder.create<ShardingOp>(op->getLoc(), resultShardings[0]);
+                ShardingOp::create(builder, op->getLoc(), resultShardings[0]);
             device =
-                builder.create<mesh::ProcessMultiIndexOp>(op->getLoc(), mesh)
+                mesh::ProcessMultiIndexOp::create(builder, op->getLoc(), mesh)
                     .getResults();
-            shapeForDevice = builder.create<mesh::ShardShapeOp>(
-                op->getLoc(), oldType.getShape(), spmdizedOperands,
+            shapeForDevice = mesh::ShardShapeOp::create(
+                builder, op->getLoc(), oldType.getShape(), spmdizedOperands,
                 newSharding->getResult(0), device);
           }
           newOperands.emplace_back(shapeForDevice.getResult()[i]);
@@ -88,7 +88,7 @@ struct CreatorOpShardingInterface
           newOperands.emplace_back(spmdizedOperands[++currOldOprndNum]);
         }
       }
-      newOp = builder.create<OpTy>(op->getLoc(), shardType, newOperands);
+      newOp = OpTy::create(builder, op->getLoc(), shardType, newOperands);
       spmdizationMap.map(op->getResult(0), newOp->getResult(0));
     } else {
       // `clone` will populate the mapping of old to new results.
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index b035a53692dcf..7d4b1127a08be 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -53,8 +53,8 @@ Operation *TensorDialect::materializeConstant(OpBuilder &builder,
   if (auto op = arith::ConstantOp::materialize(builder, value, type, loc))
     return op;
   if (complex::ConstantOp::isBuildableWith(value, type))
-    return builder.create<complex::ConstantOp>(loc, type,
-                                               llvm::cast<ArrayAttr>(value));
+    return complex::ConstantOp::create(builder, loc, type,
+                                       llvm::cast<ArrayAttr>(value));
   return nullptr;
 }
 
@@ -107,7 +107,7 @@ FailureOr<Value> tensor::getOrCreateDestination(OpBuilder &b, Location loc,
 
   // Create empty tensor.
   Value emptyTensor =
-      b.create<tensor::EmptyOp>(loc, mixedSizes, tensorType.getElementType());
+      tensor::EmptyOp::create(b, loc, mixedSizes, tensorType.getElementType());
   return emptyTensor;
 }
 
@@ -678,8 +678,8 @@ FailureOr<SmallVector<Value>> ConcatOp::decomposeOperation(OpBuilder &builder) {
     inputShapes.emplace_back(std::move(inputShape));
   }
 
-  Value replacement = builder.create<tensor::EmptyOp>(
-      loc, outputShape, getType().getElementType());
+  Value replacement = tensor::EmptyOp::create(builder, loc, outputShape,
+                                              getType().getElementType());
 
   int64_t rank = getType().getRank();
   OpFoldResult one = builder.getIndexAttr(1);
@@ -687,12 +687,12 @@ FailureOr<SmallVector<Value>> ConcatOp::decomposeOperation(OpBuilder &builder) {
   SmallVector<OpFoldResult> offsets(rank, zero);
   for (auto [index, input] : llvm::enumerate(getInputs())) {
     offsets[concatDim] = concatOffsets[index];
-    auto insertSlice = builder.create<tensor::InsertSliceOp>(
-        loc, input, replacement, offsets, inputShapes[index], strides);
+    auto insertSlice = tensor::InsertSliceOp::create(
+        builder, loc, input, replacement, offsets, inputShapes[index], strides);
     replacement = insertSlice.getResult();
   }
   if (replacement.getType() != getType()) {
-    replacement = builder.create<tensor::CastOp>(loc, getType(), replacement);
+    replacement = tensor::CastOp::create(builder, loc, getType(), replacement);
   }
   return SmallVector<Value>{replacement};
 }
@@ -723,7 +723,7 @@ ConcatOp::reifyResultShapes(OpBuilder &builder,
           builder.getIndexAttr(inferredResultType.getDimSize(i)));
     } else {
       reifiedReturnShapes[0][i] =
-          builder.create<tensor::DimOp>(init.getLoc(), init, i).getResult();
+          tensor::DimOp::create(builder, init.getLoc(), init, i).getResult();
     }
   }
 
@@ -823,8 +823,8 @@ struct InferConcatOperandTypes : public OpRewritePattern<ConcatOp> {
 
         // Use refined operand type and create cast from original operand.
         auto castOp =
-            rewriter.create<CastOp>(concatOp->getLoc(), inferredOperandType,
-                                    concatOp.getOperand(operandIdx));
+            CastOp::create(rewriter, concatOp->getLoc(), inferredOperandType,
+                           concatOp.getOperand(operandIdx));
         rewriter.modifyOpInPlace(concatOp, [=, operandIdx = operandIdx] {
           concatOp->setOperand(operandIdx, castOp->getResult(0));
         });
@@ -864,8 +864,9 @@ struct InferConcatResultType : public OpRewritePattern<ConcatOp> {
       return failure();
     }
 
-    auto newConcatOp = rewriter.create<ConcatOp>(
-        concatOp->getLoc(), inferredResultType, dim, concatOp->getOperands());
+    auto newConcatOp =
+        ConcatOp::create(rewriter, concatOp->getLoc(), inferredResultType, dim,
+                         concatOp->getOperands());
     rewriter.replaceOpWithNewOp<CastOp>(concatOp, concatOp.getResultType(),
                                         newConcatOp);
 
@@ -892,7 +893,7 @@ void DimOp::getAsmResultNames(function_ref<void(Value, StringRef)> setNameFn) {
 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
                   int64_t index) {
   auto loc = result.location;
-  Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
+  Value indexValue = arith::ConstantIndexOp::create(builder, loc, index);
   build(builder, result, source, indexValue);
 }
 
@@ -1036,10 +1037,10 @@ struct DimOfReshapeOp : public OpRewritePattern<DimOp> {
     rewriter.setInsertionPointAfter(dim);
     Location loc = dim.getLoc();
     Value extract =
-        rewriter.create<ExtractOp>(loc, reshape.getShape(), dim.getIndex());
+        ExtractOp::create(rewriter, loc, reshape.getShape(), dim.getIndex());
     if (extract.getType() != dim.getType())
       extract =
-          rewriter.create<arith::IndexCastOp>(loc, dim.getType(), extract);
+          arith::IndexCastOp::create(rewriter, loc, dim.getType(), extract);
     rewriter.replaceOp(dim, extract);
     return success();
   }
@@ -1150,8 +1151,8 @@ struct ReplaceEmptyTensorStaticShapeDims : OpRewritePattern<EmptyOp> {
     if (foldedTensorType == op.getType())
       return failure();
 
-    auto newOp = rewriter.create<EmptyOp>(op.getLoc(), foldedTensorType,
-                                          foldedDynamicSizes);
+    auto newOp = EmptyOp::create(rewriter, op.getLoc(), foldedTensorType,
+                                 foldedDynamicSizes);
     rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp);
     return success();
   }
@@ -1326,8 +1327,8 @@ struct ExtractFromCollapseShape : public OpRewritePattern<tensor::ExtractOp> {
 
       SmallVector<int64_t> basis =
           llvm::map_to_vector(group, [&](int64_t d) { return sourceSizes[d]; });
-      auto delinearize = rewriter.create<affine::AffineDelinearizeIndexOp>(
-          extractOp.getLoc(), index, basis, /*hasOuterBound=*/true);
+      auto delinearize = affine::AffineDelinearizeIndexOp::create(
+          rewriter, extractOp.getLoc(), index, basis, /*hasOuterBound=*/true);
       llvm::append_range(sourceIndices, delinearize.getResults());
     }
     if (collapseOp.getReassociationIndices().empty()) {
@@ -1498,8 +1499,8 @@ struct ExtractElementFromIndexCast
 
     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
 
-    auto newExtract = rewriter.create<tensor::ExtractOp>(
-        loc, elementTy, indexCast.getIn(), extract.getIndices());
+    auto newExtract = tensor::ExtractOp::create(
+        rewriter, loc, elementTy, indexCast.getIn(), extract.getIndices());
 
     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
                                                     newExtract);
@@ -1736,7 +1737,7 @@ struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
 
     auto loc = generateOp.getLoc();
     auto newOp =
-        rewriter.create<GenerateOp>(loc, foldedTensorType, foldedDynamicSizes);
+        GenerateOp::create(rewriter, loc, foldedTensorType, foldedDynamicSizes);
     rewriter.inlineRegionBefore(generateOp.getBody(), newOp.getBody(),
                                 newOp.getBody().begin());
     rewriter.replaceOpWithNewOp<tensor::CastOp>(generateOp,
@@ -2161,9 +2162,9 @@ struct FoldCollapseOfCastOp : public OpRewritePattern<CollapseShapeOp> {
         collapseShapeOp.getSrcMutable().assign(castOp.getSource());
       });
     } else {
-      auto newOp = rewriter.create<CollapseShapeOp>(
-          collapseShapeOp.getLoc(), newResultType, castOp.getSource(),
-          collapseShapeOp.getReassociation());
+      auto newOp = CollapseShapeOp::create(rewriter, collapseShapeOp.getLoc(),
+                                           newResultType, castOp.getSource(),
+                                           collapseShapeOp.getReassociation());
       rewriter.replaceOpWithNewOp<tensor::CastOp>(
           collapseShapeOp, collapseShapeOp.getResultType(), newOp);
     }
@@ -2240,10 +2241,10 @@ struct ConvertToStaticExpandShape : public OpRewritePattern<ExpandShapeOp> {
         newInputShape, expandOp.getSrcType().getElementType());
     auto outputType = RankedTensorType::get(
         newOutputShape, expandOp.getSrcType().getElementType());
-    auto inputCast = rewriter.create<CastOp>(expandOp.getLoc(), inputType,
-                                             expandOp.getSrc());
-    auto newExpand = rewriter.create<ExpandShapeOp>(
-        expandOp.getLoc(), outputType, inputCast.getResult(),
+    auto inputCast = CastOp::create(rewriter, expandOp.getLoc(), inputType,
+                                    expandOp.getSrc());
+    auto newExpand = ExpandShapeOp::create(
+        rewriter, expandOp.getLoc(), outputType, inputCast.getResult(),
         expandOp.getReassociationIndices(), outputOfr);
     rewriter.replaceOpWithNewOp<CastOp>(expandOp, expandOp.getType(),
                                         newExpand.getResult());
@@ -2555,10 +2556,11 @@ class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
 
     // Create folded extract.
     Location loc = sliceOp.getLoc();
-    Value newResult = rewriter.create<ExtractSliceOp>(
-        loc, sliceOp.getType(), castOp.getSource(), sliceOp.getOffsets(),
-        sliceOp.getSizes(), sliceOp.getStrides(), sliceOp.getStaticOffsets(),
-        sliceOp.getStaticSizes(), sliceOp.getStaticStrides());
+    Value newResult = ExtractSliceOp::create(
+        rewriter, loc, sliceOp.getType(), castOp.getSource(),
+        sliceOp.getOffsets(), sliceOp.getSizes(), sliceOp.getStrides(),
+        sliceOp.getStaticOffsets(), sliceOp.getStaticSizes(),
+        sliceOp.getStaticStrides());
     rewriter.replaceOp(sliceOp, newResult);
     return success();
   }
@@ -2709,8 +2711,8 @@ struct SliceCanonicalizer {
                   ExtractSliceOp newOp) {
     Value replacement = newOp.getResult();
     if (replacement.getType() != op.getType())
-      replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
-                                                    replacement);
+      replacement = tensor::CastOp::create(rewriter, op.getLoc(), op.getType(),
+                                           replacement);
     rewriter.replaceOp(op, replacement);
   }
 };
@@ -2978,8 +2980,8 @@ class InsertSliceOpConstantArgumentFolder final
       // the parallel case.
       if (std::is_same<InsertOpTy, ParallelInsertSliceOp>::value)
         rewriter.setInsertionPoint(insertSliceOp->getParentOp());
-      toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
-                                                 sourceType, toInsert);
+      toInsert = tensor::CastOp::create(rewriter, insertSliceOp.getLoc(),
+                                        sourceType, toInsert);
     }
     rewriter.replaceOpWithNewOp<InsertOpTy>(
         insertSliceOp, toInsert, insertSliceOp.getDest(), mixedOffsets,
@@ -3075,17 +3077,18 @@ struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertOpTy> {
     if (!sliceResult.isValid)
       return failure();
 
-    Operation *replacement = rewriter.create<InsertOpTy>(
-        insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(),
-        mixedSizes, insertSliceOp.getMixedStrides());
+    Operation *replacement =
+        InsertOpTy::create(rewriter, insertSliceOp.getLoc(), src, dst,
+                           insertSliceOp.getMixedOffsets(), mixedSizes,
+                           insertSliceOp.getMixedStrides());
 
     // In the parallel case there is no result and so nothing to cast.
     bool isParallelInsert =
         std::is_same<InsertOpTy, ParallelInsertSliceOp>::value;
     if (!isParallelInsert && dst.getType() != insertSliceOp.getDestType()) {
-      replacement = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
-                                                    insertSliceOp.getDestType(),
-                                                    replacement->getResult(0));
+      replacement = tensor::CastOp::create(rewriter, insertSliceOp.getLoc(),
+                                           insertSliceOp.getDestType(),
+                                           replacement->getResult(0));
     }
     rewriter.replaceOp(insertSliceOp, replacement->getResults());
     return success();
@@ -3154,8 +3157,8 @@ struct InsertSliceOpSourceCastInserter final
     // parallel case.
     if (std::is_same<InsertOpTy, ParallelInsertSliceOp>::value)
       rewriter.setInsertionPoint(insertSliceOp->getParentOp());
-    Value cast = rewriter.create<tensor::CastOp>(
-        insertSliceOp.getLoc(), newSrcType, insertSliceOp.getSource());
+    Value cast = tensor::CastOp::create(rewriter, insertSliceOp.getLoc(),
+                                        newSrcType, insertSliceOp.getSource());
     rewriter.replaceOpWithNewOp<InsertOpTy>(
         insertSliceOp, cast, insertSliceOp.getDest(),
         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
@@ -3353,7 +3356,7 @@ void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
   // a guard to reset the insertion point of the builder after it is destroyed.
   OpBuilder::InsertionGuard guard(b);
   b.createBlock(region, region->end(), blockArgTypes, blockArgLocs);
-  b.create<tensor::YieldOp>(result.location, constantPadValue);
+  tensor::YieldOp::create(b, result.location, constantPadValue);
 }
 
 llvm::SmallBitVector PadOp::getPaddedDims() {
@@ -3407,10 +3410,11 @@ struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
         padTensorOp.getSourceMutable().assign(castOp.getSource());
       });
     } else {
-      auto newOp = rewriter.create<PadOp>(
-          padTensorOp->getLoc(), newResultType, padTensorOp.getSource(),
-          padTensorOp.getStaticLow(), padTensorOp.getStaticHigh(),
-          padTensorOp.getLow(), padTensorOp.getHigh(), padTensorOp.getNofold(),
+      auto newOp = PadOp::create(
+          rewriter, padTensorOp->getLoc(), newResultType,
+          padTensorOp.getSource(), padTensorOp.getStaticLow(),
+          padTensorOp.getStaticHigh(), padTensorOp.getLow(),
+          padTensorOp.getHigh(), padTensorOp.getNofold(),
           getPrunedAttributeList(padTensorOp, PadOp::getAttributeNames()));
       IRMapping mapper;
       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
@@ -3439,8 +3443,8 @@ struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
                                             tensorCastOp.getDest().getType()))
       return failure();
 
-    auto replacementOp = rewriter.create<PadOp>(
-        padTensorOp.getLoc(), tensorCastOp.getDest().getType(),
+    auto replacementOp = PadOp::create(
+        rewriter, padTensorOp.getLoc(), tensorCastOp.getDest().getType(),
         padTensorOp.getSource(), padTensorOp.getStaticLow(),
         padTensorOp.getStaticHigh(), padTensorOp.getLow(),
         padTensorOp.getHigh(), padTensorOp.getNofold(),
@@ -3597,11 +3601,11 @@ struct FoldOrthogonalPaddings : public OpRewritePattern<PadOp> {
 
     // Create a new tensor::ExtractSliceOp, tensor::PadOp pair that performs
     // the two paddings in one step.
-    auto newSliceOp = rewriter.create<ExtractSliceOp>(
-        padOp.getLoc(), outerSliceOp.getSource(), newOffsets, newSizes,
-        innerSliceOp.getMixedStrides());
-    auto newPadOp = rewriter.create<PadOp>(
-        padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(),
+    auto newSliceOp = ExtractSliceOp::create(
+        rewriter, padOp.getLoc(), outerSliceOp.getSource(), newOffsets,
+        newSizes, innerSliceOp.getMixedStrides());
+    auto newPadOp = PadOp::create(
+        rewriter, padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(),
         padOp.getMixedLowPad(), newHighPad, padOp.getNofold(),
         getPrunedAttributeList(padOp, PadOp::getAttributeNames()));
     rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(),
@@ -3697,9 +3701,9 @@ struct FoldStaticPadding : public OpRewritePattern<PadOp> {
     // Rewrite the op using the new static type.
     auto newResultType = RankedTensorType::get(
         newOutDims, padTensorOp.getType().getElementType());
-    auto newOp = rewriter.create<PadOp>(
-        padTensorOp->getLoc(), newResultType, input, staticLow, staticHigh,
-        newLows, newHighs, padTensorOp.getNofold(),
+    auto newOp = PadOp::create(
+        rewriter, padTensorOp->getLoc(), newResultType, input, staticLow,
+        staticHigh, newLows, newHighs, padTensorOp.getNofold(),
         getPrunedAttributeList(padTensorOp, PadOp::getAttributeNames()));
 
     IRMapping mapper;
@@ -3777,9 +3781,9 @@ struct FoldConsecutiveConstantPadding : public OpRewritePattern<tensor::PadOp> {
     SmallVector<OpFoldResult> newLowPad =
         addPaddings(padOp.getMixedLowPad(), producerPad.getMixedLowPad());
 
-    auto newPadOp = rewriter.create<tensor::PadOp>(
-        padOp.getLoc(), padOp.getResultType(), producerPad.getSource(),
-        newLowPad, newHighPad, padOp.getNofold(),
+    auto newPadOp = tensor::PadOp::create(
+        rewriter, padOp.getLoc(), padOp.getResultType(),
+        producerPad.getSource(), newLowPad, newHighPad, padOp.getNofold(),
         getPrunedAttributeList(padOp, tensor::PadOp::getAttributeNames()));
     rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(),
                                 newPadOp.getRegion().begin());
@@ -3803,7 +3807,7 @@ PadOp::reifyResultShapes(OpBuilder &b,
     }
     Location loc = getLoc();
     Value dim = b.createOrFold<tensor::DimOp>(
-        loc, getSource(), b.create<arith::ConstantIndexOp>(loc, i));
+        loc, getSource(), arith::ConstantIndexOp::create(b, loc, i));
 
     AffineExpr d0, d1, d2;
     bindDims(b.getContext(), d0, d1, d2);
@@ -4108,8 +4112,8 @@ struct FoldTensorCastProducerOp
     for (auto [oldResult, newResult] :
          llvm::zip(op->getResults(), newOp->getResults())) {
       if (newResult.getType() != oldResult.getType()) {
-        replacements.push_back(rewriter.create<tensor::CastOp>(
-            op->getLoc(), oldResult.getType(), newResult));
+        replacements.push_back(tensor::CastOp::create(
+            rewriter, op->getLoc(), oldResult.getType(), newResult));
       } else {
         replacements.push_back(newResult);
       }
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
index 437bc5d00faa8..124a63281a37c 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
@@ -207,13 +207,13 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
     if (isZeroInteger(newLength)) {
       hasZeroLen = true;
     } else if (!hasZeroLen) {
-      Value check = b.create<arith::CmpIOp>(
-          loc, arith::CmpIPredicate::eq,
+      Value check = arith::CmpIOp::create(
+          b, loc, arith::CmpIPredicate::eq,
           getValueOrCreateConstantIndexOp(b, loc, newLength),
           getValueOrCreateConstantIndexOp(b, loc, zero));
       dynHasZeroLenCond =
           dynHasZeroLenCond
-              ? b.create<arith::OrIOp>(loc, check, dynHasZeroLenCond)
+              ? arith::OrIOp::create(b, loc, check, dynHasZeroLenCond)
               : check;
     }
 
@@ -237,7 +237,7 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
   auto castResult = [&](Value val) -> Value {
     if (resultType == val.getType())
       return val;
-    return b.create<tensor::CastOp>(loc, resultType, val);
+    return tensor::CastOp::create(b, loc, resultType, val);
   };
 
   // In cases where the original data source is unused: Emit a GenerateOp and
@@ -245,10 +245,10 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
   // have a dimension of size 0, the semantics of which is unclear.)
   auto createGenerateOp = [&]() {
     // Create GenerateOp.
-    auto generateOp = b.create<tensor::GenerateOp>(
-        loc, resultType, dynDims,
+    auto generateOp = tensor::GenerateOp::create(
+        b, loc, resultType, dynDims,
         [&](OpBuilder &builder, Location gLoc, ValueRange indices) {
-          builder.create<tensor::YieldOp>(gLoc, padValue);
+          tensor::YieldOp::create(builder, gLoc, padValue);
         });
     return generateOp;
   };
@@ -257,10 +257,10 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
   // the result shape of the new SliceOp has a zero dimension.
   auto createPadOfExtractSlice = [&]() {
     // Create pad(extract_slice(x)).
-    auto newSliceOp = b.create<tensor::ExtractSliceOp>(
-        loc, padOp.getSource(), newOffsets, newLengths, newStrides);
-    auto newPadOp = b.create<PadOp>(
-        loc, Type(), newSliceOp, newLows, newHighs,
+    auto newSliceOp = tensor::ExtractSliceOp::create(
+        b, loc, padOp.getSource(), newOffsets, newLengths, newStrides);
+    auto newPadOp = PadOp::create(
+        b, loc, Type(), newSliceOp, newLows, newHighs,
         /*nofold=*/padOp.getNofold(),
         getPrunedAttributeList(padOp, PadOp::getAttributeNames()));
 
@@ -287,17 +287,17 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
     Operation *thenOp;
     Operation *elseOp;
     Operation *sliceOp;
-    auto result = b.create<scf::IfOp>(
-        loc, dynHasZeroLenCond,
+    auto result = scf::IfOp::create(
+        b, loc, dynHasZeroLenCond,
         /*thenBuilder=*/
         [&](OpBuilder &b, Location loc) {
           thenOp = createGenerateOp();
-          b.create<scf::YieldOp>(loc, castResult(thenOp->getResult(0)));
+          scf::YieldOp::create(b, loc, castResult(thenOp->getResult(0)));
         },
         /*elseBuilder=*/
         [&](OpBuilder &b, Location loc) {
           std::tie(elseOp, sliceOp) = createPadOfExtractSlice();
-          b.create<scf::YieldOp>(loc, castResult(elseOp->getResult(0)));
+          scf::YieldOp::create(b, loc, castResult(elseOp->getResult(0)));
         });
     return TilingResult{
         {elseOp}, SmallVector<Value>(result->getResults()), {sliceOp}};
diff --git a/mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp b/mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp
index 723731b8bed61..ba258402b61b3 100644
--- a/mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp
+++ b/mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp
@@ -165,7 +165,7 @@ void transform::TypeConversionCastShapeDynamicDimsOp::
     if (!tensor::CastOp::areCastCompatible(input.getType(), resultType)) {
       return Value();
     }
-    return builder.create<tensor::CastOp>(loc, resultType, input).getResult();
+    return tensor::CastOp::create(builder, loc, resultType, input).getResult();
   });
   converter.addTargetMaterialization([](OpBuilder &builder, Type resultType,
                                         ValueRange inputs,
@@ -177,7 +177,7 @@ void transform::TypeConversionCastShapeDynamicDimsOp::
     if (!tensor::CastOp::areCastCompatible(input.getType(), resultType)) {
       return Value();
     }
-    return builder.create<tensor::CastOp>(loc, resultType, input).getResult();
+    return tensor::CastOp::create(builder, loc, resultType, input).getResult();
   });
 }
 
diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index 47b41efbed83b..bc11e567fa2d8 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -222,8 +222,8 @@ struct CollapseShapeOpInterface
           MemRefType::get(collapseShapeOp.getSrcType().getShape(),
                           collapseShapeOp.getSrcType().getElementType(),
                           AffineMap(), bufferType.getMemorySpace());
-      buffer = rewriter.create<bufferization::ToBufferOp>(
-          op->getLoc(), memrefType, *tensorAlloc);
+      buffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(),
+                                                 memrefType, *tensorAlloc);
     }
 
     // Result type is inferred by the builder.
@@ -349,8 +349,8 @@ struct ExpandShapeOpInterface
     if (failed(buffer))
       return failure();
 
-    auto memrefExpandShape = rewriter.create<memref::ExpandShapeOp>(
-        op->getLoc(), tensorResultType.getShape(), *buffer,
+    auto memrefExpandShape = memref::ExpandShapeOp::create(
+        rewriter, op->getLoc(), tensorResultType.getShape(), *buffer,
         expandShapeOp.getReassociationIndices(),
         expandShapeOp.getMixedOutputShape());
     replaceOpWithBufferizedValues(rewriter, op,
@@ -398,8 +398,8 @@ struct ExtractSliceOpInterface
         extractSliceOp.getResult(), options, state);
     if (failed(resultMemrefType))
       return failure();
-    Value subView = rewriter.create<memref::SubViewOp>(
-        loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
+    Value subView = memref::SubViewOp::create(
+        rewriter, loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
         mixedOffsets, mixedSizes, mixedStrides);
 
     replaceOpWithBufferizedValues(rewriter, op, subView);
@@ -469,7 +469,7 @@ static void createStores(RewriterBase &rewriter, Location loc, int dim,
   if (dim == static_cast<int>(shape.size()) - 1) {
     for (int i = 0; i < shape.back(); ++i) {
       indices.back() = constants[i];
-      rewriter.create<memref::StoreOp>(loc, *elementIt, buffer, indices);
+      memref::StoreOp::create(rewriter, loc, *elementIt, buffer, indices);
       ++elementIt;
     }
     return;
@@ -507,8 +507,8 @@ struct FromElementsOpInterface
         bufferization::getBufferType(*tensorAlloc, options, state);
     if (failed(memrefType))
       return failure();
-    Value buffer = rewriter.create<bufferization::ToBufferOp>(
-        op->getLoc(), *memrefType, *tensorAlloc);
+    Value buffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(),
+                                                     *memrefType, *tensorAlloc);
 
     // Case: tensor<0xelem_type>.
     if (fromElementsOp.getElements().empty()) {
@@ -518,8 +518,8 @@ struct FromElementsOpInterface
 
     // Case: tensor<elem_type>.
     if (shape.empty()) {
-      rewriter.create<memref::StoreOp>(
-          loc, fromElementsOp.getElements().front(), buffer);
+      memref::StoreOp::create(rewriter, loc,
+                              fromElementsOp.getElements().front(), buffer);
       replaceOpWithBufferizedValues(rewriter, op, buffer);
       return success();
     }
@@ -529,7 +529,7 @@ struct FromElementsOpInterface
     SmallVector<Value, 2> constants;
     constants.reserve(maxDim);
     for (int i = 0; i < maxDim; ++i)
-      constants.push_back(rewriter.create<arith::ConstantIndexOp>(loc, i));
+      constants.push_back(arith::ConstantIndexOp::create(rewriter, loc, i));
 
     // Traverse all `elements` and create `memref.store` ops.
     auto elementIt = fromElementsOp.getElements().begin();
@@ -576,15 +576,15 @@ static Value lowerGenerateLikeOpBody(RewriterBase &rewriter, Location loc,
   // Create linalg::MapOp.
   OpBuilder::InsertionGuard g(rewriter);
   auto linalgOp =
-      rewriter.create<linalg::MapOp>(loc, tensorType, /*inputs=*/ValueRange(),
-                                     /*init=*/tensorDestination);
+      linalg::MapOp::create(rewriter, loc, tensorType, /*inputs=*/ValueRange(),
+                            /*init=*/tensorDestination);
   Block &linalgBody = linalgOp.getMapper().emplaceBlock();
 
   // Create linalg::IndexOps.
   rewriter.setInsertionPointToStart(&linalgBody);
   SmallVector<Value> indices;
   for (int64_t dim = 0; dim < tensorType.getRank(); ++dim)
-    indices.push_back(rewriter.create<linalg::IndexOp>(loc, dim));
+    indices.push_back(linalg::IndexOp::create(rewriter, loc, dim));
 
   // Move over body.
   rewriter.mergeBlocks(&generateBody.front(), &linalgBody, indices);
@@ -644,8 +644,8 @@ struct InsertOpInterface
         getBuffer(rewriter, insertOp.getDest(), options, state);
     if (failed(destMemref))
       return failure();
-    rewriter.create<memref::StoreOp>(insertOp.getLoc(), insertOp.getScalar(),
-                                     *destMemref, insertOp.getIndices());
+    memref::StoreOp::create(rewriter, insertOp.getLoc(), insertOp.getScalar(),
+                            *destMemref, insertOp.getIndices());
     replaceOpWithBufferizedValues(rewriter, op, *destMemref);
     return success();
   }
@@ -713,9 +713,9 @@ struct InsertSliceOpInterface
         memref::SubViewOp::inferRankReducedResultType(
             insertSliceOp.getSourceType().getShape(), dstMemrefType,
             mixedOffsets, mixedSizes, mixedStrides);
-    Value subView = rewriter.create<memref::SubViewOp>(
-        loc, subviewMemRefType, *dstMemref, mixedOffsets, mixedSizes,
-        mixedStrides);
+    Value subView =
+        memref::SubViewOp::create(rewriter, loc, subviewMemRefType, *dstMemref,
+                                  mixedOffsets, mixedSizes, mixedStrides);
 
     // Copy tensor. If this tensor.insert_slice has a matching
     // tensor.extract_slice, the copy operation will eventually fold away.
@@ -796,14 +796,14 @@ struct PadOpInterface
     for (int64_t i = 0; i < resultType.getRank(); ++i) {
       if (!resultType.isDynamicDim(i))
         continue;
-      Value srcDim = rewriter.create<tensor::DimOp>(loc, padOp.getSource(), i);
+      Value srcDim = tensor::DimOp::create(rewriter, loc, padOp.getSource(), i);
       Value lowPad = toValue(mixedLowPad[i]);
       Value highPad = toValue(mixedHighPad[i]);
       AffineExpr s0, s1, s2;
       bindSymbols(op->getContext(), s0, s1, s2);
       AffineExpr sumExpr = s0 + s1 + s2;
-      Value sum = rewriter.create<affine::AffineApplyOp>(
-          loc, sumExpr, ValueRange{srcDim, lowPad, highPad});
+      Value sum = affine::AffineApplyOp::create(
+          rewriter, loc, sumExpr, ValueRange{srcDim, lowPad, highPad});
       dynamicSizes.push_back(sum);
     }
 
@@ -995,9 +995,9 @@ struct ParallelInsertSliceOpInterface
             parallelInsertSliceOp.getMixedOffsets(),
             parallelInsertSliceOp.getMixedSizes(),
             parallelInsertSliceOp.getMixedStrides());
-    Value subview = rewriter.create<memref::SubViewOp>(
-        parallelInsertSliceOp.getLoc(), subviewMemRefType, *destBuffer,
-        parallelInsertSliceOp.getMixedOffsets(),
+    Value subview = memref::SubViewOp::create(
+        rewriter, parallelInsertSliceOp.getLoc(), subviewMemRefType,
+        *destBuffer, parallelInsertSliceOp.getMixedOffsets(),
         parallelInsertSliceOp.getMixedSizes(),
         parallelInsertSliceOp.getMixedStrides());
 
@@ -1065,14 +1065,14 @@ struct SplatOpInterface
     if (options.defaultMemorySpaceFn(tensorType) != Attribute())
       return op->emitError("memory space not implemented yet");
 
-    auto linalgOp =
-        rewriter.create<linalg::MapOp>(loc, tensorType, /*inputs=*/ValueRange(),
-                                       /*init=*/*tensorAlloc);
+    auto linalgOp = linalg::MapOp::create(rewriter, loc, tensorType,
+                                          /*inputs=*/ValueRange(),
+                                          /*init=*/*tensorAlloc);
     Block &linalgBody = linalgOp.getMapper().emplaceBlock();
 
     // Create linalg::IndexOps.
     rewriter.setInsertionPointToStart(&linalgBody);
-    rewriter.create<linalg::YieldOp>(loc, splatOp.getInput());
+    linalg::YieldOp::create(rewriter, loc, splatOp.getInput());
     rewriter.replaceOp(splatOp, linalgOp.getResult()[0]);
 
     return success();
@@ -1126,8 +1126,8 @@ struct ConcatOpInterface
     MemRefType memrefType =
         MemRefType::get(concatOp.getResultType().getShape(),
                         concatOp.getResultType().getElementType(), layout);
-    Value dstBuffer = rewriter.create<bufferization::ToBufferOp>(
-        op->getLoc(), memrefType, *tensorAlloc);
+    Value dstBuffer = bufferization::ToBufferOp::create(
+        rewriter, op->getLoc(), memrefType, *tensorAlloc);
 
     // Extract the dimension for the concat op
     uint64_t concatDim = concatOp.getDim();
@@ -1142,7 +1142,7 @@ struct ConcatOpInterface
     for (const auto &[dimIdx, dimSize] :
          llvm::enumerate(tensorType.getShape())) {
       if (dimSize == ShapedType::kDynamic) {
-        auto dimOp = rewriter.create<memref::DimOp>(loc, dstBuffer, dimIdx);
+        auto dimOp = memref::DimOp::create(rewriter, loc, dstBuffer, dimIdx);
         sizes.push_back(dimOp.getResult());
         if (dimIdx == concatDim)
           dynamicConcatDim = true;
@@ -1157,7 +1157,7 @@ struct ConcatOpInterface
     if (dynamicConcatDim) {
       // One or more operands have dynamic size, so we must accumulate the
       // offset with arith ops.
-      dynamicOffset = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+      dynamicOffset = arith::ConstantIndexOp::create(rewriter, loc, 0);
     }
 
     for (auto operand : concatOp.getInputs()) {
@@ -1174,8 +1174,9 @@ struct ConcatOpInterface
 
       if (dynamicConcatDim) {
         offsets[concatDim] = dynamicOffset.value();
-        dynamicSize = rewriter.create<memref::DimOp>(loc, *srcBuffer, concatDim)
-                          .getResult();
+        dynamicSize =
+            memref::DimOp::create(rewriter, loc, *srcBuffer, concatDim)
+                .getResult();
         sizes[concatDim] = dynamicSize.value();
       } else {
         sizes[concatDim] = rewriter.getIndexAttr(operandConcatDimSize);
@@ -1188,16 +1189,16 @@ struct ConcatOpInterface
           memref::SubViewOp::inferRankReducedResultType(
               operandTensorType.getShape(), dstMemrefType, offsets, sizes,
               strides);
-      Value subview = rewriter.create<memref::SubViewOp>(
-          loc, subviewMemRefType, dstBuffer, offsets, sizes, strides);
+      Value subview = memref::SubViewOp::create(
+          rewriter, loc, subviewMemRefType, dstBuffer, offsets, sizes, strides);
 
       // Copy the source buffer into the destination subview.
       if (failed(options.createMemCpy(rewriter, loc, *srcBuffer, subview)))
         return failure();
 
       if (dynamicConcatDim) {
-        dynamicOffset = rewriter.create<arith::AddIOp>(
-            loc, dynamicOffset.value(), dynamicSize.value());
+        dynamicOffset = arith::AddIOp::create(
+            rewriter, loc, dynamicOffset.value(), dynamicSize.value());
       } else {
         concatDimOffset += operandConcatDimSize;
       }
diff --git a/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp
index 3c2b0ab42f7a6..670865de6031f 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp
@@ -42,8 +42,9 @@ struct FoldEmptyTensorWithReshapeOp : public OpRewritePattern<ReshapeOp> {
 
     // Create new tensor.empty op.
     // TODO: Do not drop tensor type encoding.
-    Value emptyTensor = rewriter.create<EmptyOp>(
-        loc, resultShapes[0], reshapeOp.getResultType().getElementType());
+    Value emptyTensor =
+        EmptyOp::create(rewriter, loc, resultShapes[0],
+                        reshapeOp.getResultType().getElementType());
     if (emptyTensor.getType() != reshapeOp.getResultType()) {
       rewriter.replaceOpWithNewOp<tensor::CastOp>(
           reshapeOp, reshapeOp.getResultType(), emptyTensor);
diff --git a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
index dd50ae54d17cc..840b4f3121428 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
@@ -53,7 +53,7 @@ static ValueRange invertCollapseShapeIndexing(
   for (int64_t i : reassociation[dim])
     basis.push_back(reshapeSourceShape[i]);
   auto delinearized =
-      b.create<AffineDelinearizeIndexOp>(loc, indexValue, basis);
+      AffineDelinearizeIndexOp::create(b, loc, indexValue, basis);
   return delinearized->getResults();
 }
 
@@ -142,15 +142,15 @@ tensor::ExtractSliceFromCollapseHelper::emitLoopNestBody(
   SmallVector<Range> extractParams =
       helper.getExtractSliceParams(builder.getContext(), multiIndices);
 
-  Value subTileResult = builder.create<tensor::ExtractSliceOp>(
-      loc, collapseShapeOp.getSrc(), extractParams);
+  Value subTileResult = tensor::ExtractSliceOp::create(
+      builder, loc, collapseShapeOp.getSrc(), extractParams);
 
   SmallVector<Range> insertParams =
       helper.getInsertSliceParams(builder.getContext(), tileInductionVars);
 
   // Collapse the dimensions of the source slice back down.
-  Value collapsedResult = builder.create<tensor::CollapseShapeOp>(
-      loc, subTileResult, reassociationIndices);
+  Value collapsedResult = tensor::CollapseShapeOp::create(
+      builder, loc, subTileResult, reassociationIndices);
   return std::make_pair(collapsedResult, insertParams);
 }
 
@@ -173,8 +173,9 @@ tensor::simplifyCollapseShapeWithRankReducingExtractSlice(
   SmallVector<OpFoldResult> sizes =
       tensor::getMixedSizes(rewriter, op.getLoc(), op.getSrc());
   SmallVector<OpFoldResult> strides(sourceType.getRank(), one);
-  auto sliceOp = rewriter.create<tensor::ExtractSliceOp>(
-      op.getLoc(), info->sliceResultType, op.getSrc(), offsets, sizes, strides);
+  auto sliceOp = tensor::ExtractSliceOp::create(
+      rewriter, op.getLoc(), info->sliceResultType, op.getSrc(), offsets, sizes,
+      strides);
 
   if (!info->newReassociationIndices.has_value()) {
     rewriter.replaceOp(op, sliceOp.getResult());
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
index 13de55b0672a5..d76c02af7ab16 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
@@ -112,9 +112,9 @@ TransferReadOfExtractSliceOpFolder::matchAndRewriteMaskableOp(
       extractSliceOp.getMixedStrides(), extractSliceOp.getDroppedDims(),
       indices, sourceIndices);
 
-  Operation *newOp = rewriter.create<vector::TransferReadOp>(
-      readOp.getLoc(), readOp.getVectorType(), extractSliceOp.getSource(),
-      sourceIndices,
+  Operation *newOp = vector::TransferReadOp::create(
+      rewriter, readOp.getLoc(), readOp.getVectorType(),
+      extractSliceOp.getSource(), sourceIndices,
       AffineMapAttr::get(expandDimsToRank(
           readOp.getPermutationMap(), extractSliceOp.getSourceType().getRank(),
           extractSliceOp.getDroppedDims())),
diff --git a/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp b/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp
index bad56d4111dca..43d9d704a29ff 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp
@@ -64,9 +64,10 @@ FailureOr<Value> tensor::buildIndependentOp(OpBuilder &b, tensor::PadOp padOp,
     return padOp.getResult();
 
   // Create a new tensor::PadOp.
-  auto newPadOp = b.create<PadOp>(
-      loc, padOp.getResultType(), padOp.getSource(), newMixedLow, newMixedHigh,
-      constantPadding, padOp.getNofold(), /*attrs=*/ArrayRef<NamedAttribute>{});
+  auto newPadOp =
+      PadOp::create(b, loc, padOp.getResultType(), padOp.getSource(),
+                    newMixedLow, newMixedHigh, constantPadding,
+                    padOp.getNofold(), /*attrs=*/ArrayRef<NamedAttribute>{});
 
   // Create a tensor::ExtractSliceOp.
   // Reify the result sizes of the old tensor::PadOp.
@@ -83,10 +84,10 @@ FailureOr<Value> tensor::buildIndependentOp(OpBuilder &b, tensor::PadOp padOp,
       offsets.push_back(b.getIndexAttr(0));
     } else {
       offsets.push_back(
-          b.create<affine::AffineApplyOp>(
-               loc, b.getAffineDimExpr(0) - b.getAffineDimExpr(1),
-               std::initializer_list<Value>{cast<Value>(newMixedLow[i]),
-                                            cast<Value>(prevLow)})
+          affine::AffineApplyOp::create(
+              b, loc, b.getAffineDimExpr(0) - b.getAffineDimExpr(1),
+              std::initializer_list<Value>{cast<Value>(newMixedLow[i]),
+                                           cast<Value>(prevLow)})
               .getResult());
     }
     // size = reified result size
@@ -99,7 +100,7 @@ FailureOr<Value> tensor::buildIndependentOp(OpBuilder &b, tensor::PadOp padOp,
     strides.push_back(b.getIndexAttr(1));
   }
 
-  return b.create<ExtractSliceOp>(loc, newPadOp, offsets, sizes, strides)
+  return ExtractSliceOp::create(b, loc, newPadOp, offsets, sizes, strides)
       .getResult();
 }
 
@@ -124,7 +125,7 @@ FailureOr<Value> tensor::buildIndependentOp(OpBuilder &b,
 
   // Create a new tensor::EmptyOp.
   Value newEmptyOp =
-      b.create<EmptyOp>(loc, newSizes, emptyOp.getType().getElementType());
+      EmptyOp::create(b, loc, newSizes, emptyOp.getType().getElementType());
 
   // Create a tensor::ExtractSliceOp.
   SmallVector<OpFoldResult> offsets(newSizes.size(), b.getIndexAttr(0));
diff --git a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
index 20bb4d1caf019..e0af2f77d44b8 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
@@ -76,8 +76,8 @@ struct FoldUnPaddingCollapseIntoExtract
       return rewriter.notifyMatchFailure(collapseShapeOp,
                                          "expected unpadding collapse");
 
-    Value unPaddedExtractSlice = rewriter.create<tensor::ExtractSliceOp>(
-        extractSliceOp.getLoc(), collapseShapeOp.getResultType(),
+    Value unPaddedExtractSlice = tensor::ExtractSliceOp::create(
+        rewriter, extractSliceOp.getLoc(), collapseShapeOp.getResultType(),
         extractSliceOp.getSource(), extractSliceOp.getMixedOffsets(),
         extractSliceOp.getMixedSizes(), extractSliceOp.getMixedStrides());
     rewriter.replaceOp(collapseShapeOp, unPaddedExtractSlice);
@@ -270,8 +270,8 @@ struct BubbleUpExpandThroughParallelCollapse
     // matches the number of dimensions of the result, then the expand_shape
     // is a no-op.
     if (newExpandReInds.size() != newExpandSizes.size()) {
-      newCollapseSrc = rewriter.create<tensor::ExpandShapeOp>(
-          loc, expandResultType, newCollapseSrc, newExpandReInds,
+      newCollapseSrc = tensor::ExpandShapeOp::create(
+          rewriter, loc, expandResultType, newCollapseSrc, newExpandReInds,
           newExpandSizes);
     }
 
@@ -280,8 +280,8 @@ struct BubbleUpExpandThroughParallelCollapse
     // is a no-op.
     Value replacement = newCollapseSrc;
     if (newCollapseReInds.size() != newExpandSizes.size()) {
-      replacement = rewriter.create<tensor::CollapseShapeOp>(
-          loc, newCollapseSrc, newCollapseReInds);
+      replacement = tensor::CollapseShapeOp::create(
+          rewriter, loc, newCollapseSrc, newCollapseReInds);
     }
     rewriter.replaceOp(expandOp, replacement);
     return success();
@@ -405,8 +405,8 @@ struct BubbleUpExpandShapeThroughExtractSlice
         shape, expandShapeOp.getResultType().getElementType());
 
     // Create a new ExtractSliceOp and ExpandShapeOp.
-    Value newSliceOp = rewriter.create<tensor::ExtractSliceOp>(
-        loc, expandShapeOp.getSrc(), collapsedOffsets, collapsedSizes,
+    Value newSliceOp = tensor::ExtractSliceOp::create(
+        rewriter, loc, expandShapeOp.getSrc(), collapsedOffsets, collapsedSizes,
         collapsedStrides);
     rewriter.replaceOpWithNewOp<tensor::ExpandShapeOp>(
         sliceOp, resultType, newSliceOp,
@@ -735,9 +735,9 @@ struct BubbleUpCollapseShapeThroughExtractSlice
                              groupExpandedOffsets.rend());
     }
 
-    Value newSliceOp = rewriter.create<tensor::ExtractSliceOp>(
-        collapseShapeOp->getLoc(), collapseShapeOp.getSrc(), expandedOffsets,
-        expandedSizes, expandedStrides);
+    Value newSliceOp = tensor::ExtractSliceOp::create(
+        rewriter, collapseShapeOp->getLoc(), collapseShapeOp.getSrc(),
+        expandedOffsets, expandedSizes, expandedStrides);
     rewriter.replaceOpWithNewOp<tensor::CollapseShapeOp>(
         sliceOp, sliceOp.getResultType(), newSliceOp,
         collapseShapeOp.getReassociationIndices());
diff --git a/mlir/lib/Dialect/Tensor/Transforms/RewriteAsConstant.cpp b/mlir/lib/Dialect/Tensor/Transforms/RewriteAsConstant.cpp
index 7c9fced540adb..69e649d2eebe8 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/RewriteAsConstant.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/RewriteAsConstant.cpp
@@ -196,7 +196,7 @@ struct PadOpToConstant final : public OpRewritePattern<PadOp> {
                                          "tensor type not supported");
 
     if (newOp.getType() != resultType)
-      newOp = rewriter.create<tensor::CastOp>(loc, resultType, newOp);
+      newOp = tensor::CastOp::create(rewriter, loc, resultType, newOp);
 
     rewriter.replaceOp(padTensorOp, newOp);
     return success();
diff --git a/mlir/lib/Dialect/Tensor/Transforms/RuntimeOpVerification.cpp b/mlir/lib/Dialect/Tensor/Transforms/RuntimeOpVerification.cpp
index 6e3285abffbfc..838ff1f987c63 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/RuntimeOpVerification.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/RuntimeOpVerification.cpp
@@ -47,15 +47,14 @@ struct CastOpInterface
 
     if (isa<UnrankedTensorType>(srcType)) {
       // Check rank.
-      Value srcRank = builder.create<RankOp>(loc, castOp.getSource());
+      Value srcRank = RankOp::create(builder, loc, castOp.getSource());
       Value resultRank =
-          builder.create<arith::ConstantIndexOp>(loc, resultType.getRank());
-      Value isSameRank = builder.create<arith::CmpIOp>(
-          loc, arith::CmpIPredicate::eq, srcRank, resultRank);
-      builder.create<cf::AssertOp>(
-          loc, isSameRank,
-          RuntimeVerifiableOpInterface::generateErrorMessage(op,
-                                                             "rank mismatch"));
+          arith::ConstantIndexOp::create(builder, loc, resultType.getRank());
+      Value isSameRank = arith::CmpIOp::create(
+          builder, loc, arith::CmpIPredicate::eq, srcRank, resultRank);
+      cf::AssertOp::create(builder, loc, isSameRank,
+                           RuntimeVerifiableOpInterface::generateErrorMessage(
+                               op, "rank mismatch"));
     }
 
     // Check dimension sizes.
@@ -70,13 +69,13 @@ struct CastOpInterface
         continue;
 
       Value srcDimSz =
-          builder.create<DimOp>(loc, castOp.getSource(), it.index());
+          DimOp::create(builder, loc, castOp.getSource(), it.index());
       Value resultDimSz =
-          builder.create<arith::ConstantIndexOp>(loc, it.value());
-      Value isSameSz = builder.create<arith::CmpIOp>(
-          loc, arith::CmpIPredicate::eq, srcDimSz, resultDimSz);
-      builder.create<cf::AssertOp>(
-          loc, isSameSz,
+          arith::ConstantIndexOp::create(builder, loc, it.value());
+      Value isSameSz = arith::CmpIOp::create(
+          builder, loc, arith::CmpIPredicate::eq, srcDimSz, resultDimSz);
+      cf::AssertOp::create(
+          builder, loc, isSameSz,
           RuntimeVerifiableOpInterface::generateErrorMessage(
               op, "size mismatch of dim " + std::to_string(it.index())));
     }
@@ -89,10 +88,11 @@ struct DimOpInterface
   void generateRuntimeVerification(Operation *op, OpBuilder &builder,
                                    Location loc) const {
     auto dimOp = cast<DimOp>(op);
-    Value rank = builder.create<RankOp>(loc, dimOp.getSource());
-    Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
-    builder.create<cf::AssertOp>(
-        loc, generateInBoundsCheck(builder, loc, dimOp.getIndex(), zero, rank),
+    Value rank = RankOp::create(builder, loc, dimOp.getSource());
+    Value zero = arith::ConstantIndexOp::create(builder, loc, 0);
+    cf::AssertOp::create(
+        builder, loc,
+        generateInBoundsCheck(builder, loc, dimOp.getIndex(), zero, rank),
         RuntimeVerifiableOpInterface::generateErrorMessage(
             op, "index is out of bounds"));
   }
@@ -124,7 +124,7 @@ struct ExtractInsertOpInterface
     }
 
     auto indices = extractInsertOp.getIndices();
-    auto zero = builder.create<arith::ConstantIndexOp>(loc, 0);
+    auto zero = arith::ConstantIndexOp::create(builder, loc, 0);
     Value assertCond;
     for (auto i : llvm::seq<int64_t>(0, rank)) {
       Value dimOp = builder.createOrFold<tensor::DimOp>(loc, tensor, i);
@@ -134,10 +134,9 @@ struct ExtractInsertOpInterface
           i > 0 ? builder.createOrFold<arith::AndIOp>(loc, assertCond, inBounds)
                 : inBounds;
     }
-    builder.create<cf::AssertOp>(
-        loc, assertCond,
-        RuntimeVerifiableOpInterface::generateErrorMessage(
-            op, "out-of-bounds access"));
+    cf::AssertOp::create(builder, loc, assertCond,
+                         RuntimeVerifiableOpInterface::generateErrorMessage(
+                             op, "out-of-bounds access"));
   }
 };
 
@@ -152,8 +151,8 @@ struct ExtractSliceOpInterface
     // For each dimension, assert that:
     // 0 <= offset < dim_size
     // 0 <= offset + (size - 1) * stride < dim_size
-    Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
-    Value one = builder.create<arith::ConstantIndexOp>(loc, 1);
+    Value zero = arith::ConstantIndexOp::create(builder, loc, 0);
+    Value one = arith::ConstantIndexOp::create(builder, loc, 1);
     for (int64_t i = 0, e = sourceType.getRank(); i < e; ++i) {
       Value offset = getValueOrCreateConstantIndexOp(
           builder, loc, extractSliceOp.getMixedOffsets()[i]);
@@ -167,21 +166,21 @@ struct ExtractSliceOpInterface
           loc, extractSliceOp.getSource(), i);
       Value offsetInBounds =
           generateInBoundsCheck(builder, loc, offset, zero, dimSize);
-      builder.create<cf::AssertOp>(
-          loc, offsetInBounds,
+      cf::AssertOp::create(
+          builder, loc, offsetInBounds,
           RuntimeVerifiableOpInterface::generateErrorMessage(
               op, "offset " + std::to_string(i) + " is out-of-bounds"));
 
       // Verify that slice does not run out-of-bounds.
-      Value sizeMinusOne = builder.create<arith::SubIOp>(loc, size, one);
+      Value sizeMinusOne = arith::SubIOp::create(builder, loc, size, one);
       Value sizeMinusOneTimesStride =
-          builder.create<arith::MulIOp>(loc, sizeMinusOne, stride);
+          arith::MulIOp::create(builder, loc, sizeMinusOne, stride);
       Value lastPos =
-          builder.create<arith::AddIOp>(loc, offset, sizeMinusOneTimesStride);
+          arith::AddIOp::create(builder, loc, offset, sizeMinusOneTimesStride);
       Value lastPosInBounds =
           generateInBoundsCheck(builder, loc, lastPos, zero, dimSize);
-      builder.create<cf::AssertOp>(
-          loc, lastPosInBounds,
+      cf::AssertOp::create(
+          builder, loc, lastPosInBounds,
           RuntimeVerifiableOpInterface::generateErrorMessage(
               op, "extract_slice runs out-of-bounds along dimension " +
                       std::to_string(i)));
diff --git a/mlir/lib/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.cpp
index d50d7c62b789c..b6fdba360deea 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.cpp
@@ -59,8 +59,8 @@ struct InsertSliceLikeOpSubsetInsertionOpInterface
   Value buildSubsetExtraction(Operation *op, OpBuilder &builder,
                               Location loc) const {
     auto insertSliceOp = cast<OpTy>(op);
-    auto extractOp = builder.create<tensor::ExtractSliceOp>(
-        loc, insertSliceOp.getSourceType(), insertSliceOp.getDest(),
+    auto extractOp = tensor::ExtractSliceOp::create(
+        builder, loc, insertSliceOp.getSourceType(), insertSliceOp.getDest(),
         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
         insertSliceOp.getMixedStrides());
     return extractOp.getResult();
diff --git a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp
index 6df401d4c6962..bdbafa5b01d07 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp
@@ -50,9 +50,9 @@ FailureOr<TilingResult> tensor::replaceExtractSliceWithTiledProducer(
                                       builder.getIndexAttr(0));
     SmallVector<OpFoldResult> strides(sliceOp.getSourceType().getRank(),
                                       builder.getIndexAttr(1));
-    auto newSliceOp = builder.create<tensor::ExtractSliceOp>(
-        sliceOp.getLoc(), sliceOp.getType(), tiledResult->tiledValues[0],
-        offsets, sliceOp.getMixedSizes(), strides);
+    auto newSliceOp = tensor::ExtractSliceOp::create(
+        builder, sliceOp.getLoc(), sliceOp.getType(),
+        tiledResult->tiledValues[0], offsets, sliceOp.getMixedSizes(), strides);
     tiledResult->tiledValues[0] = newSliceOp;
   }
 
diff --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
index 3f6258b5e4d43..fd75e1090174b 100644
--- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
@@ -56,7 +56,7 @@ PadOp mlir::tensor::createPadHighOp(RankedTensorType resType, Value source,
     high[idx] = affine::makeComposedFoldedAffineApply(b, loc, d0 - d1,
                                                       {outDim, sourceDim});
   }
-  return b.create<PadOp>(loc, resType, source, low, high, pad, nofold);
+  return PadOp::create(b, loc, resType, source, low, high, pad, nofold);
 }
 
 SmallVector<Value> mlir::tensor::createDynamicDimValues(OpBuilder &b,
@@ -67,7 +67,7 @@ SmallVector<Value> mlir::tensor::createDynamicDimValues(OpBuilder &b,
   for (const auto &en : llvm::enumerate(tensorTy.getShape())) {
     if (en.value() == ShapedType::kDynamic)
       dynamicDims.push_back(
-          b.create<tensor::DimOp>(loc, rankedTensor, en.index()));
+          tensor::DimOp::create(b, loc, rankedTensor, en.index()));
   }
   return dynamicDims;
 }
@@ -119,7 +119,7 @@ mlir::tensor::dropGivenUnitDims(OpBuilder &b, Location loc, Value src,
     reassocMaps.emplace_back(llvm::make_range(seq.begin(), seq.end()));
     nextDimToGroup = setBit + 1;
   }
-  return b.create<tensor::CollapseShapeOp>(loc, src, reassocMaps);
+  return tensor::CollapseShapeOp::create(b, loc, src, reassocMaps);
 }
 
 bool mlir::tensor::isCastLikeInsertSliceOp(InsertSliceOp op) {



More information about the Mlir-commits mailing list