[Mlir-commits] [mlir] 7b78796 - [mlir][NFC] update `mlir/Dialect` create APIs (25/n) (#149932)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Mon Jul 21 16:58:02 PDT 2025


Author: Maksim Levental
Date: 2025-07-21T19:57:59-04:00
New Revision: 7b787965431e666858fdf66db25ee5a129833927

URL: https://github.com/llvm/llvm-project/commit/7b787965431e666858fdf66db25ee5a129833927
DIFF: https://github.com/llvm/llvm-project/commit/7b787965431e666858fdf66db25ee5a129833927.diff

LOG: [mlir][NFC] update `mlir/Dialect` create APIs (25/n) (#149932)

See https://github.com/llvm/llvm-project/pull/147168 for more info.

Added: 
    

Modified: 
    mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
    mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp
    mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
    mlir/lib/Dialect/UB/IR/UBOps.cpp
    mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
    mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
    mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
    mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp
    mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
    mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
    mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
index 5758d8d5ef506..606626dfe4d2c 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
@@ -845,9 +845,9 @@ struct PadSliceOptimization : public OpRewritePattern<tosa::SliceOp> {
         getTosaConstShape(rewriter, sliceOp.getLoc(), newPadPaddings);
     auto newPadTy =
         RankedTensorType::get(newPadShape, inputTy.getElementType());
-    auto newPadOp = rewriter.create<tosa::PadOp>(
-        padOp.getLoc(), newPadTy, padOp.getInput1(), newPaddingsOp,
-        padOp.getPadConst());
+    auto newPadOp = tosa::PadOp::create(rewriter, padOp.getLoc(), newPadTy,
+                                        padOp.getInput1(), newPaddingsOp,
+                                        padOp.getPadConst());
 
     // Update SliceOp and point to new PadOp
     auto newStartOp =
@@ -897,9 +897,9 @@ struct SliceDynamicSizeCanonicalization
     }
 
     auto size_op = getTosaConstShape(rewriter, sliceOp.getLoc(), sliceSizes);
-    auto newSliceOp = rewriter.create<tosa::SliceOp>(
-        sliceOp.getLoc(), sliceOp.getType(), sliceOp.getInput1(),
-        sliceOp.getStart(), size_op);
+    auto newSliceOp =
+        tosa::SliceOp::create(rewriter, sliceOp.getLoc(), sliceOp.getType(),
+                              sliceOp.getInput1(), sliceOp.getStart(), size_op);
 
     rewriter.replaceOp(sliceOp, newSliceOp.getResult());
     return success();

diff  --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index f0ff430bae882..5b4a2c9d85ea1 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -180,12 +180,12 @@ Operation *TosaDialect::materializeConstant(OpBuilder &builder, Attribute value,
   // Tosa dialect constants only support ElementsAttr unlike standard dialect
   // constant which supports all attributes.
   if (llvm::isa<shapeType>(type) && llvm::isa<DenseIntElementsAttr>(value)) {
-    return builder.create<tosa::ConstShapeOp>(
-        loc, type, llvm::cast<DenseIntElementsAttr>(value));
+    return tosa::ConstShapeOp::create(builder, loc, type,
+                                      llvm::cast<DenseIntElementsAttr>(value));
   }
   if (llvm::isa<ElementsAttr>(value))
-    return builder.create<tosa::ConstOp>(loc, type,
-                                         llvm::cast<ElementsAttr>(value));
+    return tosa::ConstOp::create(builder, loc, type,
+                                 llvm::cast<ElementsAttr>(value));
   return nullptr;
 }
 
@@ -323,7 +323,7 @@ Value mlir::tosa::createPadConstTensor(OpBuilder &builder, Location loc,
                                    builder.getFloatAttr(srcElemType, val))
           : DenseElementsAttr::get(padConstEType,
                                    builder.getIntegerAttr(srcElemType, val))};
-  return builder.create<tosa::ConstOp>(loc, padConstType, padConstAttr);
+  return tosa::ConstOp::create(builder, loc, padConstType, padConstAttr);
 }
 
 //===----------------------------------------------------------------------===//
@@ -2415,7 +2415,7 @@ LogicalResult TransposeOp::reifyResultShapes(
     int32_t dimInInput = transposePerms[dim];
     if (inputType.isDynamicDim(dimInInput))
       returnedDims[dim] =
-          builder.create<tensor::DimOp>(getLoc(), input, dimInInput)
+          tensor::DimOp::create(builder, getLoc(), input, dimInInput)
               .getResult();
     else
       returnedDims[dim] =
@@ -3947,12 +3947,12 @@ std::optional<Value> mlir::tosa::createZeroPointTensor(OpBuilder &builder,
   if (llvm::isa<FloatType>(srcElemType)) {
     auto zpAttr = DenseElementsAttr::get(
         zpType, builder.getFloatAttr(srcElemType, static_cast<double>(zp)));
-    return builder.create<tosa::ConstOp>(loc, zpType, zpAttr);
+    return tosa::ConstOp::create(builder, loc, zpType, zpAttr);
   }
   if (llvm::isa<IntegerType>(srcElemType)) {
     auto zpAttr =
         DenseElementsAttr::get(zpType, builder.getIntegerAttr(srcElemType, zp));
-    return builder.create<tosa::ConstOp>(loc, zpType, zpAttr);
+    return tosa::ConstOp::create(builder, loc, zpType, zpAttr);
   }
   llvm::errs() << "zero point is not allowed for unsupported data types\n";
   return std::nullopt;

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
index f6caa2a985a4d..9474299a39582 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
@@ -90,12 +90,12 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
 
     if (inputETy != resultETy) {
       inputType = inputType.clone(resultETy);
-      input = rewriter.create<tosa::CastOp>(op.getLoc(), inputType, input);
+      input = tosa::CastOp::create(rewriter, op.getLoc(), inputType, input);
     }
 
     if (weightETy != resultETy) {
       weightType = weightType.clone(resultETy);
-      weight = rewriter.create<tosa::CastOp>(op.getLoc(), weightType, weight);
+      weight = tosa::CastOp::create(rewriter, op.getLoc(), weightType, weight);
     }
 
     if (iZp != 0 || wZp != 0) {
@@ -109,9 +109,9 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
         auto zpTy = RankedTensorType::get(shape, ety);
         auto zpAttr =
             DenseElementsAttr::get(zpTy, rewriter.getIntegerAttr(ety, zp));
-        auto zpVal = rewriter.create<tosa::ConstOp>(op.getLoc(), zpTy, zpAttr);
-        return rewriter.create<tosa::SubOp>(op.getLoc(), val.getType(), val,
-                                            zpVal);
+        auto zpVal = tosa::ConstOp::create(rewriter, op.getLoc(), zpTy, zpAttr);
+        return tosa::SubOp::create(rewriter, op.getLoc(), val.getType(), val,
+                                   zpVal);
       };
 
       input = applyZp(input, iZp);
@@ -138,10 +138,10 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
       auto padTy = RankedTensorType::get({1}, inputETy);
       auto padAttr = DenseElementsAttr::get(padTy, zeroAttr);
       Value padVal =
-          rewriter.create<tosa::ConstOp>(op->getLoc(), padTy, padAttr);
+          tosa::ConstOp::create(rewriter, op->getLoc(), padTy, padAttr);
       inputType = RankedTensorType::get(newShape, inputETy);
-      input = rewriter.create<tosa::PadOp>(op->getLoc(), inputType, input,
-                                           padSizeVal, padVal);
+      input = tosa::PadOp::create(rewriter, op->getLoc(), inputType, input,
+                                  padSizeVal, padVal);
     }
 
     // Perform an elementwise mul over the reshaped input and weight.
@@ -161,7 +161,7 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
     auto shiftZeroAttr = DenseElementsAttr::get(
         shiftType, rewriter.getIntegerAttr(shiftElementType, 0));
     Value constZero =
-        rewriter.create<tosa::ConstOp>(op.getLoc(), shiftType, shiftZeroAttr);
+        tosa::ConstOp::create(rewriter, op.getLoc(), shiftType, shiftZeroAttr);
     Value mulValue = rewriter
                          .create<tosa::MulOp>(op.getLoc(), mulShapeType, input,
                                               weight, constZero)
@@ -174,8 +174,8 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
         dyn_cast<RankedTensorType>(input.getType()).getElementType());
     auto outputShapeValue =
         getTosaConstShape(rewriter, op->getLoc(), outputShape);
-    Value outputValue = rewriter.create<tosa::ReshapeOp>(
-        op.getLoc(), outputShapeType, mulValue, outputShapeValue);
+    Value outputValue = tosa::ReshapeOp::create(
+        rewriter, op.getLoc(), outputShapeType, mulValue, outputShapeValue);
 
     Value bias = op.getBias();
     if (EqualizeRanks(rewriter, op.getLoc(), outputValue, bias).failed()) {

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
index df6d52615478e..dc5c51b0abad5 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
@@ -62,14 +62,16 @@ class TransposeConvNonStridedConverter
     convPad[2] = kernelWidth - 1 + pad[2];
     convPad[3] = kernelWidth - 1 + pad[3];
 
-    auto reverse1 = rewriter.create<tosa::ReverseOp>(
-        loc, weightTy, weight, /* axis = */ rewriter.getI32IntegerAttr(1));
-    auto reverse2 = rewriter.create<tosa::ReverseOp>(
-        loc, weightTy, reverse1, /* axis = */ rewriter.getI32IntegerAttr(2));
-
-    Value conv2d = rewriter.create<tosa::Conv2DOp>(
-        loc, resultTy, input, reverse2, bias, op.getInputZp(), op.getWeightZp(),
-        rewriter.getDenseI64ArrayAttr(convPad),
+    auto reverse1 =
+        tosa::ReverseOp::create(rewriter, loc, weightTy, weight,
+                                /* axis = */ rewriter.getI32IntegerAttr(1));
+    auto reverse2 =
+        tosa::ReverseOp::create(rewriter, loc, weightTy, reverse1,
+                                /* axis = */ rewriter.getI32IntegerAttr(2));
+
+    Value conv2d = tosa::Conv2DOp::create(
+        rewriter, loc, resultTy, input, reverse2, bias, op.getInputZp(),
+        op.getWeightZp(), rewriter.getDenseI64ArrayAttr(convPad),
         rewriter.getDenseI64ArrayAttr(stride),
         rewriter.getDenseI64ArrayAttr({1, 1}),
         /* acc_type = */ op.getAccType());
@@ -216,8 +218,8 @@ class TransposeConvStridedConverter
         inputPaddingVal, inputPadConst);
 
     // We use a zero bias as we need to broadcast the bias.
-    auto zeroBias = rewriter.create<tosa::ConstOp>(
-        loc,
+    auto zeroBias = tosa::ConstOp::create(
+        rewriter, loc,
         RankedTensorType::get({outputChannels * stride[0] * stride[1]},
                               biasETy),
         DenseElementsAttr::get(

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
index a9e98c8908e15..4d347c02ee16d 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
@@ -112,7 +112,7 @@ class TypeModificationState {
           OpBuilder builder{value.getContext()};
           builder.setInsertionPointAfter(value.getDefiningOp());
           castValue =
-              builder.create<tensor::CastOp>(value.getLoc(), oldType, value);
+              tensor::CastOp::create(builder, value.getLoc(), oldType, value);
         }
 
         use->set(castValue);

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
index db7a3c671dedc..5590927c3f774 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
@@ -419,8 +419,8 @@ std::optional<Value> TosaReduceTransposes::buildMappedToValue(
     return std::nullopt;
   }
   ImplicitLocOpBuilder builder(reshapeOp.getLoc(), rewriter);
-  auto foldedReshape = rewriter.create<ReshapeOp>(
-      reshapeOp.getLoc(),
+  auto foldedReshape = ReshapeOp::create(
+      rewriter, reshapeOp.getLoc(),
       RankedTensorType::get(applyTOSAPermutation(shape, hoistedPerms),
                             reshapeOutputType.getElementType()),
       reshapeOp.getInput1(),
@@ -439,8 +439,8 @@ std::optional<Value> TosaReduceTransposes::buildMappedToValue(
   if (!maybeNewDenseAttr.has_value())
     return std::nullopt;
   auto newDenseAttr = maybeNewDenseAttr.value();
-  auto newConstOp = rewriter.create<ConstOp>(
-      constOp.getLoc(), newDenseAttr.getType(), newDenseAttr);
+  auto newConstOp = ConstOp::create(rewriter, constOp.getLoc(),
+                                    newDenseAttr.getType(), newDenseAttr);
   return newConstOp->getResult(0);
 }
 

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp
index 3b697a2ee3e47..677d8e9904a67 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp
@@ -37,7 +37,7 @@ void mlir::tosa::populateTosaTypeConversion(TypeConverter &converter) {
     if (inputs.size() != 1)
       return Value();
 
-    return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
+    return UnrealizedConversionCastOp::create(builder, loc, resultType, inputs)
         .getResult(0);
   });
   converter.addTargetMaterialization([&](OpBuilder &builder, Type resultType,
@@ -46,7 +46,7 @@ void mlir::tosa::populateTosaTypeConversion(TypeConverter &converter) {
     if (inputs.size() != 1)
       return Value();
 
-    return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
+    return UnrealizedConversionCastOp::create(builder, loc, resultType, inputs)
         .getResult(0);
   });
 }

diff  --git a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
index 9844abcc34cb1..69eda03e03ab3 100644
--- a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
+++ b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
@@ -33,18 +33,18 @@ mlir::tosa::condenseValues(const SmallVector<Value> &values) {
 
 Value mlir::tosa::clampFloatHelper(Location loc, Value arg, Value min,
                                    Value max, OpBuilder &rewriter) {
-  Value minValue = rewriter.create<arith::MinimumFOp>(loc, arg, max);
-  return rewriter.create<arith::MaximumFOp>(loc, minValue, min);
+  Value minValue = arith::MinimumFOp::create(rewriter, loc, arg, max);
+  return arith::MaximumFOp::create(rewriter, loc, minValue, min);
 }
 
 Value mlir::tosa::clampIntHelper(Location loc, Value arg, Value min, Value max,
                                  OpBuilder &rewriter, bool isUnsigned) {
   if (isUnsigned) {
-    auto minOrArg = rewriter.create<arith::MaxUIOp>(loc, min, arg);
-    return rewriter.create<arith::MinUIOp>(loc, max, minOrArg);
+    auto minOrArg = arith::MaxUIOp::create(rewriter, loc, min, arg);
+    return arith::MinUIOp::create(rewriter, loc, max, minOrArg);
   }
-  auto minOrArg = rewriter.create<arith::MaxSIOp>(loc, min, arg);
-  return rewriter.create<arith::MinSIOp>(loc, max, minOrArg);
+  auto minOrArg = arith::MaxSIOp::create(rewriter, loc, min, arg);
+  return arith::MinSIOp::create(rewriter, loc, max, minOrArg);
 }
 
 bool mlir::tosa::validIntegerRange(IntegerType ty, int64_t value) {
@@ -144,8 +144,8 @@ LogicalResult mlir::tosa::EqualizeRanks(ImplicitLocOpBuilder &builder,
       ArrayRef<int64_t>(reshapeOutputShape), reshapeInputType.getElementType());
   auto reshapeOutputShapeValue = getTosaConstShape(builder, reshapeOutputShape);
 
-  auto reshapeLower = builder.create<tosa::ReshapeOp>(
-      reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue);
+  auto reshapeLower = tosa::ReshapeOp::create(
+      builder, reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue);
 
   if (input1Rank > input2Rank) {
     input1 = higherTensorValue;
@@ -162,7 +162,7 @@ Value mlir::tosa::getTosaConstShape(ImplicitLocOpBuilder &builder,
                                     llvm::ArrayRef<int64_t> shape) {
   auto attr = builder.getIndexTensorAttr(convertFromMlirShape(shape));
   auto type = mlir::tosa::shapeType::get(builder.getContext(), shape.size());
-  mlir::Operation *mlir_op = builder.create<tosa::ConstShapeOp>(type, attr);
+  mlir::Operation *mlir_op = tosa::ConstShapeOp::create(builder, type, attr);
   return mlir_op->getResult(0);
 }
 

diff  --git a/mlir/lib/Dialect/UB/IR/UBOps.cpp b/mlir/lib/Dialect/UB/IR/UBOps.cpp
index 5b2cfe7bf4264..ee523f9522953 100644
--- a/mlir/lib/Dialect/UB/IR/UBOps.cpp
+++ b/mlir/lib/Dialect/UB/IR/UBOps.cpp
@@ -52,7 +52,7 @@ void UBDialect::initialize() {
 Operation *UBDialect::materializeConstant(OpBuilder &builder, Attribute value,
                                           Type type, Location loc) {
   if (auto attr = dyn_cast<PoisonAttr>(value))
-    return builder.create<PoisonOp>(loc, type, attr);
+    return PoisonOp::create(builder, loc, type, attr);
 
   return nullptr;
 }

diff  --git a/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp b/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
index 7de32f7cbfb8b..0fa353abc4972 100644
--- a/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
+++ b/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
@@ -60,11 +60,11 @@ SmallVector<Value> x86vector::MaskCompressOp::getIntrinsicOperands(
   if (adaptor.getSrc()) {
     src = adaptor.getSrc();
   } else if (adaptor.getConstantSrc()) {
-    src = rewriter.create<LLVM::ConstantOp>(loc, opType,
-                                            adaptor.getConstantSrcAttr());
+    src = LLVM::ConstantOp::create(rewriter, loc, opType,
+                                   adaptor.getConstantSrcAttr());
   } else {
     auto zeroAttr = rewriter.getZeroAttr(opType);
-    src = rewriter.create<LLVM::ConstantOp>(loc, opType, zeroAttr);
+    src = LLVM::ConstantOp::create(rewriter, loc, opType, zeroAttr);
   }
 
   return SmallVector<Value>{adaptor.getA(), src, adaptor.getK()};
@@ -77,7 +77,7 @@ x86vector::DotOp::getIntrinsicOperands(ArrayRef<Value> operands,
   SmallVector<Value> intrinsicOperands(operands);
   // Dot product of all elements, broadcasted to all elements.
   Value scale =
-      rewriter.create<LLVM::ConstantOp>(getLoc(), rewriter.getI8Type(), 0xff);
+      LLVM::ConstantOp::create(rewriter, getLoc(), rewriter.getI8Type(), 0xff);
   intrinsicOperands.push_back(scale);
 
   return intrinsicOperands;
@@ -90,14 +90,14 @@ SmallVector<Value> x86vector::DotInt8Op::getIntrinsicOperands(
   Adaptor adaptor(operands, *this);
   intrinsicOprnds.push_back(adaptor.getW());
   // Bitcast `a` and `b` to i32
-  Value bitcast_a = rewriter.create<LLVM::BitcastOp>(
-      getLoc(),
+  Value bitcast_a = LLVM::BitcastOp::create(
+      rewriter, getLoc(),
       VectorType::get((getA().getType().getShape()[0] / 4),
                       rewriter.getIntegerType(32)),
       adaptor.getA());
   intrinsicOprnds.push_back(bitcast_a);
-  Value bitcast_b = rewriter.create<LLVM::BitcastOp>(
-      getLoc(),
+  Value bitcast_b = LLVM::BitcastOp::create(
+      rewriter, getLoc(),
       VectorType::get((getB().getType().getShape()[0] / 4),
                       rewriter.getIntegerType(32)),
       adaptor.getB());

diff  --git a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
index 87f7867fe1b7c..385ec5e824051 100644
--- a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
+++ b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
@@ -37,8 +37,8 @@ Value mlir::x86vector::avx2::inline_asm::mm256BlendPsAsm(
       "=x,x,x"; // Careful: constraint parser is very brittle: no ws!
   SmallVector<Value> asmVals{v1, v2};
   auto asmStr = llvm::formatv(asmTp, llvm::format_hex(mask, /*width=*/2)).str();
-  auto asmOp = b.create<LLVM::InlineAsmOp>(
-      v1.getType(), /*operands=*/asmVals, /*asm_string=*/asmStr,
+  auto asmOp = LLVM::InlineAsmOp::create(
+      b, v1.getType(), /*operands=*/asmVals, /*asm_string=*/asmStr,
       /*constraints=*/asmCstr, /*has_side_effects=*/false,
       /*is_align_stack=*/false, LLVM::TailCallKind::None,
       /*asm_dialect=*/asmDialectAttr,
@@ -48,14 +48,14 @@ Value mlir::x86vector::avx2::inline_asm::mm256BlendPsAsm(
 
 Value mlir::x86vector::avx2::intrin::mm256UnpackLoPs(ImplicitLocOpBuilder &b,
                                                      Value v1, Value v2) {
-  return b.create<vector::ShuffleOp>(
-      v1, v2, ArrayRef<int64_t>{0, 8, 1, 9, 4, 12, 5, 13});
+  return vector::ShuffleOp::create(b, v1, v2,
+                                   ArrayRef<int64_t>{0, 8, 1, 9, 4, 12, 5, 13});
 }
 
 Value mlir::x86vector::avx2::intrin::mm256UnpackHiPs(ImplicitLocOpBuilder &b,
                                                      Value v1, Value v2) {
-  return b.create<vector::ShuffleOp>(
-      v1, v2, ArrayRef<int64_t>{2, 10, 3, 11, 6, 14, 7, 15});
+  return vector::ShuffleOp::create(
+      b, v1, v2, ArrayRef<int64_t>{2, 10, 3, 11, 6, 14, 7, 15});
 }
 ///                            a  a   b   b  a  a   b   b
 /// Takes an 8 bit mask, 2 bit for each position of a[0, 3)  **and** b[0, 4):
@@ -68,7 +68,7 @@ Value mlir::x86vector::avx2::intrin::mm256ShufflePs(ImplicitLocOpBuilder &b,
   MaskHelper::extractShuffle(mask, b01, b23, b45, b67);
   SmallVector<int64_t> shuffleMask = {
       b01, b23, b45 + 8, b67 + 8, b01 + 4, b23 + 4, b45 + 8 + 4, b67 + 8 + 4};
-  return b.create<vector::ShuffleOp>(v1, v2, shuffleMask);
+  return vector::ShuffleOp::create(b, v1, v2, shuffleMask);
 }
 
 // imm[0:1] out of imm[0:3] is:
@@ -96,7 +96,7 @@ Value mlir::x86vector::avx2::intrin::mm256Permute2f128Ps(
   MaskHelper::extractPermute(mask, b03, b47);
   appendToMask(b03);
   appendToMask(b47);
-  return b.create<vector::ShuffleOp>(v1, v2, shuffleMask);
+  return vector::ShuffleOp::create(b, v1, v2, shuffleMask);
 }
 
 /// If bit i of `mask` is zero, take f32 at i from v1 else take it from v2.
@@ -108,7 +108,7 @@ Value mlir::x86vector::avx2::intrin::mm256BlendPs(ImplicitLocOpBuilder &b,
     bool isSet = mask & (1 << i);
     shuffleMask.push_back(!isSet ? i : i + 8);
   }
-  return b.create<vector::ShuffleOp>(v1, v2, shuffleMask);
+  return vector::ShuffleOp::create(b, v1, v2, shuffleMask);
 }
 
 /// AVX2 4x8xf32-specific transpose lowering using a "C intrinsics" model.
@@ -244,13 +244,13 @@ class TransposeOpLowering : public OpRewritePattern<vector::TransposeOp> {
           VectorType::get({n * m}, op.getSourceVectorType().getElementType());
       auto reshInputType = VectorType::get({m, n}, srcType.getElementType());
       auto reshInput =
-          ib.create<vector::ShapeCastOp>(flattenedType, op.getVector());
-      reshInput = ib.create<vector::ShapeCastOp>(reshInputType, reshInput);
+          vector::ShapeCastOp::create(ib, flattenedType, op.getVector());
+      reshInput = vector::ShapeCastOp::create(ib, reshInputType, reshInput);
 
       // Extract 1-D vectors from the higher-order dimension of the input
       // vector.
       for (int64_t i = 0; i < m; ++i)
-        vs.push_back(ib.create<vector::ExtractOp>(reshInput, i));
+        vs.push_back(vector::ExtractOp::create(ib, reshInput, i));
 
       // Transpose set of 1-D vectors.
       if (m == 4)
@@ -260,16 +260,16 @@ class TransposeOpLowering : public OpRewritePattern<vector::TransposeOp> {
 
       // Insert transposed 1-D vectors into the higher-order dimension of the
       // output vector.
-      Value res = ib.create<arith::ConstantOp>(reshInputType,
-                                               ib.getZeroAttr(reshInputType));
+      Value res = arith::ConstantOp::create(ib, reshInputType,
+                                            ib.getZeroAttr(reshInputType));
       for (int64_t i = 0; i < m; ++i)
-        res = ib.create<vector::InsertOp>(vs[i], res, i);
+        res = vector::InsertOp::create(ib, vs[i], res, i);
 
       // The output vector still has the shape of the input vector (e.g., 4x8).
       // We have to transpose their dimensions and retrieve its original rank
       // (e.g., 1x8x1x4x1).
-      res = ib.create<vector::ShapeCastOp>(flattenedType, res);
-      res = ib.create<vector::ShapeCastOp>(op.getResultVectorType(), res);
+      res = vector::ShapeCastOp::create(ib, flattenedType, res);
+      res = vector::ShapeCastOp::create(ib, op.getResultVectorType(), res);
       rewriter.replaceOp(op, res);
       return success();
     };

diff  --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
index 78cbf884a1911..c8da5558438ea 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
@@ -531,7 +531,7 @@ void CreateDescOp::build(OpBuilder &builder, OperationState &state,
   int64_t size = static_cast<int64_t>(offsets.size());
   auto type = VectorType::get(size, builder.getIndexType());
   auto values = getValueOrCreateConstantIndexOp(builder, loc, offsets);
-  auto offset = builder.create<vector::FromElementsOp>(loc, type, values);
+  auto offset = vector::FromElementsOp::create(builder, loc, type, values);
   build(builder, state, TensorDesc, source, offset);
 }
 
@@ -651,7 +651,7 @@ void UpdateOffsetOp::build(OpBuilder &builder, OperationState &state,
   int64_t size = static_cast<int64_t>(offsets.size());
   auto type = VectorType::get({size}, builder.getIndexType());
   auto values = getValueOrCreateConstantIndexOp(builder, loc, offsets);
-  auto offset = builder.create<vector::FromElementsOp>(loc, type, values);
+  auto offset = vector::FromElementsOp::create(builder, loc, type, values);
   build(builder, state, tdescTy, tensorDesc, offset);
 }
 

diff  --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp
index bc61979c2732b..e95d2f75d8b5a 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp
@@ -120,14 +120,14 @@ static Value resolveDistributedTy(Value orig, T expected,
   // If orig is a vector type, create a shape cast op to reconcile the types.
   if (isa<VectorType>(orig.getType())) {
     auto castOp =
-        rewriter.create<vector::ShapeCastOp>(orig.getLoc(), expected, orig);
+        vector::ShapeCastOp::create(rewriter, orig.getLoc(), expected, orig);
     return castOp.getResult();
   }
   // If orig is a tensor descriptor type, create an unrealized conversion cast
   // op to reconcile the types.
   if (isa<xegpu::TensorDescType>(orig.getType())) {
-    auto castOp = rewriter.create<UnrealizedConversionCastOp>(orig.getLoc(),
-                                                              expected, orig);
+    auto castOp = UnrealizedConversionCastOp::create(rewriter, orig.getLoc(),
+                                                     expected, orig);
     castOp->setAttr(resolveSIMTTypeMismatch, rewriter.getUnitAttr());
     return castOp.getResult(0);
   }
@@ -191,19 +191,20 @@ struct MoveFuncBodyToWarpExecuteOnLane0
     SmallVector<Type> privateAttributionsTypes =
         llvm::map_to_vector(gpuFuncOp.getPrivateAttributions(),
                             [](BlockArgument arg) { return arg.getType(); });
-    auto newGpuFunc = rewriter.create<gpu::GPUFuncOp>(
-        gpuFuncOp.getLoc(), gpuFuncOp.getName(), gpuFuncOp.getFunctionType(),
-        workgroupAttributionsTypes, privateAttributionsTypes);
+    auto newGpuFunc = gpu::GPUFuncOp::create(
+        rewriter, gpuFuncOp.getLoc(), gpuFuncOp.getName(),
+        gpuFuncOp.getFunctionType(), workgroupAttributionsTypes,
+        privateAttributionsTypes);
     newGpuFunc->setAttrs(gpuFuncOp->getAttrs());
     // Create a WarpExecuteOnLane0Op with same arguments and results as the
     // original gpuFuncOp.
     rewriter.setInsertionPointToEnd(&newGpuFunc.getFunctionBody().front());
-    auto laneId = rewriter.create<gpu::LaneIdOp>(
-        newGpuFunc.getLoc(), rewriter.getIndexType(),
+    auto laneId = gpu::LaneIdOp::create(
+        rewriter, newGpuFunc.getLoc(), rewriter.getIndexType(),
         /** upperBound = **/ mlir::IntegerAttr());
     ArrayRef<Type> gpuFuncResultType = gpuFuncOp.getFunctionType().getResults();
-    auto warpOp = rewriter.create<gpu::WarpExecuteOnLane0Op>(
-        laneId.getLoc(), gpuFuncResultType, laneId,
+    auto warpOp = gpu::WarpExecuteOnLane0Op::create(
+        rewriter, laneId.getLoc(), gpuFuncResultType, laneId,
         xegpu::targetinfo::subgroupSize, newGpuFunc.getArguments(),
         newGpuFunc.getArgumentTypes());
     Block &warpBodyBlock = warpOp.getBodyRegion().front();
@@ -211,8 +212,8 @@ struct MoveFuncBodyToWarpExecuteOnLane0
     auto origRetunOp =
         cast<gpu::ReturnOp>(gpuFuncOp.getBlocks().back().getTerminator());
     rewriter.setInsertionPointAfter(origRetunOp);
-    rewriter.create<gpu::YieldOp>(origRetunOp.getLoc(),
-                                  origRetunOp.getOperands());
+    gpu::YieldOp::create(rewriter, origRetunOp.getLoc(),
+                         origRetunOp.getOperands());
     rewriter.eraseOp(origRetunOp);
     // Move the original function body to the WarpExecuteOnLane0Op body.
     rewriter.inlineRegionBefore(gpuFuncOp.getBody(), warpOp.getBodyRegion(),
@@ -220,7 +221,7 @@ struct MoveFuncBodyToWarpExecuteOnLane0
     rewriter.eraseBlock(&warpBodyBlock);
     // Insert a new ReturnOp after the WarpExecuteOnLane0Op.
     rewriter.setInsertionPointAfter(warpOp);
-    rewriter.create<gpu::ReturnOp>(newGpuFunc.getLoc(), warpOp.getResults());
+    gpu::ReturnOp::create(rewriter, newGpuFunc.getLoc(), warpOp.getResults());
     rewriter.replaceOp(gpuFuncOp, newGpuFunc);
     return success();
   }
@@ -296,8 +297,8 @@ struct CreateNdDescDistribution final : public gpu::WarpDistributionPattern {
     xegpu::TensorDescType distributedTensorDescTy =
         descOp.getType().dropLayouts(); // Distributed tensor descriptor type
                                         // does not contain layout info.
-    Value newDescOp = rewriter.create<xegpu::CreateNdDescOp>(
-        newWarpOp.getLoc(), distributedTensorDescTy, newDescOperands,
+    Value newDescOp = xegpu::CreateNdDescOp::create(
+        rewriter, newWarpOp.getLoc(), distributedTensorDescTy, newDescOperands,
         descOp->getAttrs());
 
     Value distributedVal = newWarpOp.getResult(operandIdx);
@@ -398,8 +399,9 @@ struct StoreNdDistribution final : public gpu::WarpDistributionPattern {
         resolveDistributedTy(newWarpOp.getResult(newRetIndices[1]),
                              distributedTensorDescTy, rewriter));
 
-    auto newStoreOp = rewriter.create<xegpu::StoreNdOp>(
-        newWarpOp.getLoc(), TypeRange{}, newStoreOperands, storeOp->getAttrs());
+    auto newStoreOp =
+        xegpu::StoreNdOp::create(rewriter, newWarpOp.getLoc(), TypeRange{},
+                                 newStoreOperands, storeOp->getAttrs());
     xegpu::removeLayoutAttrs(newStoreOp);
     rewriter.eraseOp(storeOp);
     return success();
@@ -490,8 +492,8 @@ struct LoadNdDistribution final : public gpu::WarpDistributionPattern {
         loadOp.getTensorDescType().dropLayouts(); // Distributed tensor
                                                   // descriptor type does not
                                                   // contain layout info.
-    auto newLoadOp = rewriter.create<xegpu::LoadNdOp>(
-        newWarpOp.getLoc(), loadNdDistValueTyOrFailure.value(),
+    auto newLoadOp = xegpu::LoadNdOp::create(
+        rewriter, newWarpOp.getLoc(), loadNdDistValueTyOrFailure.value(),
         resolveDistributedTy(newWarpOp->getResult(newRetIndices[0]),
                              distributedTensorDescTy, rewriter),
         loadOp->getAttrs());
@@ -626,9 +628,9 @@ struct DpasDistribution final : public gpu::WarpDistributionPattern {
           resolveDistributedTy(newWarpOp.getResult(newRetIndices[i]),
                                newDpasOperandExpectedTypes[i], rewriter));
     }
-    auto newDpasOp =
-        rewriter.create<xegpu::DpasOp>(newWarpOp->getLoc(), distributedResultTy,
-                                       newDpasOperands, dpasOp->getAttrs());
+    auto newDpasOp = xegpu::DpasOp::create(rewriter, newWarpOp->getLoc(),
+                                           distributedResultTy, newDpasOperands,
+                                           dpasOp->getAttrs());
     xegpu::removeLayoutAttrs(newDpasOp);
     Value distributedVal = newWarpOp.getResult(operandIdx);
     // Resolve the output type.
@@ -715,8 +717,8 @@ struct UpdateNdOffsetDistribution final : public gpu::WarpDistributionPattern {
       }
     }
     // Create a new update op outside the warp op.
-    auto newUpdateOp = rewriter.create<xegpu::UpdateNdOffsetOp>(
-        newWarpOp.getLoc(), newTensorDescTy, newUpdateOperands,
+    auto newUpdateOp = xegpu::UpdateNdOffsetOp::create(
+        rewriter, newWarpOp.getLoc(), newTensorDescTy, newUpdateOperands,
         updateOp->getAttrs());
     xegpu::removeLayoutAttrs(newUpdateOp);
     Value distributedVal = newWarpOp.getResult(operandIdx);
@@ -782,9 +784,8 @@ struct PrefetchNdDistribution final : public gpu::WarpDistributionPattern {
     rewriter.setInsertionPointAfter(newWarpOp);
     SmallVector<Value> newPrefetchOperands = {resolveDistributedTy(
         newWarpOp.getResult(newRetIndices[0]), newTensorDescTy, rewriter)};
-    rewriter.create<xegpu::PrefetchNdOp>(newWarpOp.getLoc(), TypeRange{},
-                                         newPrefetchOperands,
-                                         prefetchOp->getAttrs());
+    xegpu::PrefetchNdOp::create(rewriter, newWarpOp.getLoc(), TypeRange{},
+                                newPrefetchOperands, prefetchOp->getAttrs());
     xegpu::removeLayoutAttrs(prefetchOp);
     rewriter.eraseOp(prefetchOp);
     return success();
@@ -806,9 +807,9 @@ struct GpuBarrierDistribution final : public gpu::WarpDistributionPattern {
       return failure();
     // Move the barrier op outside of the warp op.
     rewriter.setInsertionPointAfter(warpOp);
-    rewriter.create<gpu::BarrierOp>(
-        barrierOp.getLoc(), barrierOp->getResultTypes(),
-        barrierOp->getOperands(), barrierOp->getAttrs());
+    gpu::BarrierOp::create(rewriter, barrierOp.getLoc(),
+                           barrierOp->getResultTypes(),
+                           barrierOp->getOperands(), barrierOp->getAttrs());
     rewriter.eraseOp(barrierOp);
     return success();
   }

diff  --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
index dc76441b27c02..de67098d397f4 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
@@ -79,8 +79,9 @@ struct UnrollPattern : public OpRewritePattern<SourceOp> {
                                  rewriter.getUnitAttr());
       auto blkAttr = NamedAttribute(rewriter.getStringAttr(blockAttrName),
                                     rewriter.getDenseI64ArrayAttr(blockSize));
-      auto castOp = rewriter.create<UnrealizedConversionCastOp>(
-          loc, destTy, srcs, ArrayRef<NamedAttribute>({attr, blkAttr}));
+      auto castOp = UnrealizedConversionCastOp::create(
+          rewriter, loc, destTy, srcs,
+          ArrayRef<NamedAttribute>({attr, blkAttr}));
       return castOp.getResult(0);
     }
 
@@ -105,8 +106,9 @@ struct UnrollPattern : public OpRewritePattern<SourceOp> {
                                  rewriter.getUnitAttr());
       auto blkAttr = NamedAttribute(rewriter.getStringAttr(blockAttrName),
                                     rewriter.getDenseI64ArrayAttr(blockSize));
-      auto castOp = rewriter.create<UnrealizedConversionCastOp>(
-          loc, destTypes, src, ArrayRef<NamedAttribute>({attr, blkAttr}));
+      auto castOp = UnrealizedConversionCastOp::create(
+          rewriter, loc, destTypes, src,
+          ArrayRef<NamedAttribute>({attr, blkAttr}));
       return castOp.getResults();
     }
 
@@ -140,10 +142,10 @@ struct UnrollCreateNdOp : public UnrollPattern<xegpu::CreateNdDescOp> {
     auto addi = [&](OpFoldResult a, int64_t b) -> Value {
       std::optional<int64_t> maybeInt = getConstantIntValue(a);
       if (maybeInt) {
-        return rewriter.create<arith::ConstantIndexOp>(loc, *maybeInt + b);
+        return arith::ConstantIndexOp::create(rewriter, loc, *maybeInt + b);
       } else {
         auto aV = llvm::cast<Value>(a);
-        auto bV = rewriter.create<arith::ConstantIndexOp>(loc, b);
+        auto bV = arith::ConstantIndexOp::create(rewriter, loc, b);
         return rewriter.createOrFold<arith::AddIOp>(loc, aV, bV);
       }
     };
@@ -165,9 +167,9 @@ struct UnrollCreateNdOp : public UnrollPattern<xegpu::CreateNdDescOp> {
            llvm::zip(validIdxes, oldOffsets, offsets))
         mixedOffsets[idx] = addi(oldOff, offset);
 
-      auto newOp = rewriter.create<xegpu::CreateNdDescOp>(
-          loc, newTdescTy, op.getSource(), mixedOffsets, op.getMixedSizes(),
-          op.getMixedStrides());
+      auto newOp = xegpu::CreateNdDescOp::create(
+          rewriter, loc, newTdescTy, op.getSource(), mixedOffsets,
+          op.getMixedSizes(), op.getMixedStrides());
       newOps.push_back(newOp);
     }
     Value castOp = unpack(newOps, tdescTy, *targetShape, loc, rewriter);
@@ -195,8 +197,8 @@ struct UnrollUpdateNdOffsetOp : public UnrollPattern<xegpu::UpdateNdOffsetOp> {
 
     SmallVector<Value> newOps;
     for (auto t : convertedTdesc) {
-      auto newOp = rewriter.create<xegpu::UpdateNdOffsetOp>(
-          loc, t.getType(), t, op.getOffsets(), op.getConstOffsets());
+      auto newOp = xegpu::UpdateNdOffsetOp::create(
+          rewriter, loc, t.getType(), t, op.getOffsets(), op.getConstOffsets());
       newOps.push_back(newOp);
     }
     Value castOp = unpack(newOps, op.getType(), *targetShape, loc, rewriter);
@@ -222,7 +224,8 @@ struct UnrollPrefetchNdOp : public UnrollPattern<xegpu::PrefetchNdOp> {
         op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter);
 
     for (auto t : convertedTdesc)
-      rewriter.create<xegpu::PrefetchNdOp>(loc, TypeRange(), t, op->getAttrs());
+      xegpu::PrefetchNdOp::create(rewriter, loc, TypeRange(), t,
+                                  op->getAttrs());
 
     rewriter.eraseOp(op);
     return success();
@@ -253,7 +256,7 @@ struct UnrollLoadNdOp : public UnrollPattern<xegpu::LoadNdOp> {
     SmallVector<Value> newOps;
     for (auto t : convertedTdescs) {
       auto newOp =
-          rewriter.create<xegpu::LoadNdOp>(loc, newValueTy, t, op->getAttrs());
+          xegpu::LoadNdOp::create(rewriter, loc, newValueTy, t, op->getAttrs());
       newOps.push_back(newOp);
     }
 
@@ -287,8 +290,8 @@ struct UnrollStoreNdOp : public UnrollPattern<xegpu::StoreNdOp> {
         op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter);
 
     for (auto [v, t] : llvm::zip(convertedValues, convertedTdescs))
-      rewriter.create<xegpu::StoreNdOp>(loc, v, t, op.getL1HintAttr(),
-                                        op.getL2HintAttr(), op.getL3HintAttr());
+      xegpu::StoreNdOp::create(rewriter, loc, v, t, op.getL1HintAttr(),
+                               op.getL2HintAttr(), op.getL3HintAttr());
 
     rewriter.eraseOp(op);
     return success();
@@ -380,8 +383,8 @@ struct UnrollDpasOp : public UnrollPattern<xegpu::DpasOp> {
           if (tmpC)
             operands.push_back(tmpC);
 
-          tmpC = rewriter.create<xegpu::DpasOp>(loc, vecTy, operands,
-                                                op->getAttrs());
+          tmpC = xegpu::DpasOp::create(rewriter, loc, vecTy, operands,
+                                       op->getAttrs());
         }
         newOps.push_back(tmpC);
       }
@@ -432,22 +435,23 @@ struct UnrollCreateDescOp : public UnrollPattern<xegpu::CreateDescOp> {
            llvm::zip(convertedIndiceVec, convertedIndiceTypes)) {
         for (int64_t i = 0; i < numNewChunks; ++i) {
           // Compute the offset
-          Value inc = rewriter.create<arith::ConstantIndexOp>(
-              loc, i * blockedChunkSize);
-          Value incVec = rewriter.create<vector::SplatOp>(loc, indiceType, inc);
+          Value inc = arith::ConstantIndexOp::create(rewriter, loc,
+                                                     i * blockedChunkSize);
+          Value incVec =
+              vector::SplatOp::create(rewriter, loc, indiceType, inc);
           Value offsetIndice =
-              rewriter.create<arith::AddIOp>(loc, indice, incVec);
+              arith::AddIOp::create(rewriter, loc, indice, incVec);
 
-          auto newOp = rewriter.create<xegpu::CreateDescOp>(
-              loc, newTdescTy, op.getSource(), offsetIndice);
+          auto newOp = xegpu::CreateDescOp::create(
+              rewriter, loc, newTdescTy, op.getSource(), offsetIndice);
 
           newOps.push_back(newOp);
         }
       }
     } else {
       for (auto indice : convertedIndiceVec) {
-        auto newOp = rewriter.create<xegpu::CreateDescOp>(
-            loc, newTdescTy, op.getSource(), indice);
+        auto newOp = xegpu::CreateDescOp::create(rewriter, loc, newTdescTy,
+                                                 op.getSource(), indice);
         newOps.push_back(newOp);
       }
     }
@@ -511,9 +515,9 @@ struct UnrollLoadGatherOp : public UnrollPattern<xegpu::LoadGatherOp> {
 
     SmallVector<Value> newOps;
     for (auto [t, m] : llvm::zip(convertedTdescs, convertedMasks)) {
-      auto newOp = rewriter.create<xegpu::LoadGatherOp>(
-          loc, newValueTy, t, m, op.getL1HintAttr(), op.getL2HintAttr(),
-          op.getL3HintAttr());
+      auto newOp = xegpu::LoadGatherOp::create(
+          rewriter, loc, newValueTy, t, m, op.getL1HintAttr(),
+          op.getL2HintAttr(), op.getL3HintAttr());
       newOps.push_back(newOp);
     }
 
@@ -543,7 +547,7 @@ struct UnrollPrefetchOp : public UnrollPattern<xegpu::PrefetchOp> {
         op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter);
 
     for (auto t : convertedTdesc)
-      rewriter.create<xegpu::PrefetchOp>(loc, TypeRange(), t, op->getAttrs());
+      xegpu::PrefetchOp::create(rewriter, loc, TypeRange(), t, op->getAttrs());
 
     rewriter.eraseOp(op);
     return success();
@@ -604,9 +608,8 @@ struct UnrollStoreScatterOp : public UnrollPattern<xegpu::StoreScatterOp> {
       Value v = convertedValues[i];
       Value t = convertedTdescs[i];
       Value m = op.getMask() ? convertedMasks[i] : nullptr;
-      rewriter.create<xegpu::StoreScatterOp>(loc, v, t, m, op.getL1HintAttr(),
-                                             op.getL2HintAttr(),
-                                             op.getL3HintAttr());
+      xegpu::StoreScatterOp::create(rewriter, loc, v, t, m, op.getL1HintAttr(),
+                                    op.getL2HintAttr(), op.getL3HintAttr());
     }
 
     rewriter.eraseOp(op);
@@ -658,7 +661,7 @@ struct UnrollUpdateOffsetOp : public UnrollPattern<xegpu::UpdateOffsetOp> {
 
     for (auto [t, o] : llvm::zip(convertedTdesc, convertedOffsetVec)) {
       auto newOp =
-          rewriter.create<xegpu::UpdateOffsetOp>(loc, t.getType(), t, o);
+          xegpu::UpdateOffsetOp::create(rewriter, loc, t.getType(), t, o);
       newOps.push_back(newOp);
     }
     Value castOp = unpack(newOps, op.getType(), *targetShape, loc, rewriter);

diff  --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
index be7b860dd1729..80bb5e888bdc7 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
@@ -121,11 +121,11 @@ struct WgToSgCreateNdOp : public OpConversionPattern<xegpu::CreateNdDescOp> {
     for (size_t i = 0; i < rank; ++i) {
       size_t dimIdx = originalOffsets.size() - rank + i;
       Value constOffset =
-          rewriter.create<arith::ConstantIndexOp>(loc, distUnitBaseAddr[i]);
+          arith::ConstantIndexOp::create(rewriter, loc, distUnitBaseAddr[i]);
       Value offset =
           rewriter.createOrFold<index::AddOp>(loc, localOffset[i], constOffset);
       Value modValue =
-          rewriter.create<arith::ConstantIndexOp>(loc, distUnitShape[i]);
+          arith::ConstantIndexOp::create(rewriter, loc, distUnitShape[i]);
       Value offsetMod =
           rewriter.createOrFold<index::RemUOp>(loc, offset, modValue);
       Value origOffset = getValueOrCreateConstantIndexOp(
@@ -162,7 +162,7 @@ struct WgToSgCreateNdOp : public OpConversionPattern<xegpu::CreateNdDescOp> {
     // TODO : Handle order attribute
     // Get the subgroup ID
     auto linearSgId =
-        rewriter.create<gpu::SubgroupIdOp>(loc, /*upper_bound=*/nullptr);
+        gpu::SubgroupIdOp::create(rewriter, loc, /*upper_bound=*/nullptr);
 
     // Create constants for layout dimensions
     SmallVector<Value> sgLayoutDim(sgLayout.size());
@@ -170,8 +170,8 @@ struct WgToSgCreateNdOp : public OpConversionPattern<xegpu::CreateNdDescOp> {
 
     for (size_t i = 0; i < sgLayout.size(); i++) {
       sgLayoutDim[i] =
-          rewriter.create<arith::ConstantIndexOp>(loc, sgLayout[i]);
-      sgDataDim[i] = rewriter.create<arith::ConstantIndexOp>(loc, sgShape[i]);
+          arith::ConstantIndexOp::create(rewriter, loc, sgLayout[i]);
+      sgDataDim[i] = arith::ConstantIndexOp::create(rewriter, loc, sgShape[i]);
     }
 
     auto deLinearizeSgId =
@@ -201,9 +201,9 @@ struct WgToSgCreateNdOp : public OpConversionPattern<xegpu::CreateNdDescOp> {
           calculateGlobalOffsets(rewriter, loc, originalOffsets, localOffset,
                                  distUnitBaseAddr, distUnitShape);
 
-      auto newCreateNdOp = rewriter.create<xegpu::CreateNdDescOp>(
-          loc, newTdescTy, op.getSource(), globalOffsets, op.getMixedSizes(),
-          op.getMixedStrides());
+      auto newCreateNdOp = xegpu::CreateNdDescOp::create(
+          rewriter, loc, newTdescTy, op.getSource(), globalOffsets,
+          op.getMixedSizes(), op.getMixedStrides());
       newCreateNdOps.push_back(newCreateNdOp);
     }
 
@@ -224,8 +224,8 @@ struct WgToSgLoadNdOp : public OpConversionPattern<xegpu::LoadNdOp> {
           dyn_cast<xegpu::TensorDescType>(src.getType());
       ArrayRef<int64_t> srcShape = tdescTy.getShape();
       VectorType newResTy = VectorType::get(srcShape, tdescTy.getElementType());
-      auto newLoadOp = rewriter.create<xegpu::LoadNdOp>(op.getLoc(), newResTy,
-                                                        src, op->getAttrs());
+      auto newLoadOp = xegpu::LoadNdOp::create(rewriter, op.getLoc(), newResTy,
+                                               src, op->getAttrs());
       newLoadOps.push_back(newLoadOp);
     }
     rewriter.replaceOpWithMultiple(op, {newLoadOps});
@@ -242,8 +242,8 @@ struct WgToSgStoreNdOp : public OpConversionPattern<xegpu::StoreNdOp> {
   matchAndRewrite(xegpu::StoreNdOp op, OneToNOpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     for (auto [v, t] : llvm::zip(adaptor.getValue(), adaptor.getTensorDesc()))
-      rewriter.create<xegpu::StoreNdOp>(op.getLoc(), v, t, op.getL1HintAttr(),
-                                        op.getL2HintAttr(), op.getL3HintAttr());
+      xegpu::StoreNdOp::create(rewriter, op.getLoc(), v, t, op.getL1HintAttr(),
+                               op.getL2HintAttr(), op.getL3HintAttr());
 
     rewriter.eraseOp(op);
     return success();
@@ -261,8 +261,8 @@ struct WgToSgUpdateNdOffsetOp
                   ConversionPatternRewriter &rewriter) const override {
     llvm::SmallVector<Value> newUpdateTileOffsetOps;
     for (auto tDesc : adaptor.getTensorDesc()) {
-      auto newUpdateTileOffsetOp = rewriter.create<xegpu::UpdateNdOffsetOp>(
-          op.getLoc(), tDesc.getType(), tDesc, op.getOffsets(),
+      auto newUpdateTileOffsetOp = xegpu::UpdateNdOffsetOp::create(
+          rewriter, op.getLoc(), tDesc.getType(), tDesc, op.getOffsets(),
           op.getConstOffsets());
       newUpdateTileOffsetOps.push_back(newUpdateTileOffsetOp);
     }
@@ -305,7 +305,7 @@ struct WgToSgDpasOp : public OpConversionPattern<xegpu::DpasOp> {
             llvm::cast<VectorType>(bVec.getType()).getShape();
         VectorType resTy = VectorType::get({aVecShape[0], bVecShape[1]},
                                            resultTy.getElementType());
-        tmpC = rewriter.create<xegpu::DpasOp>(loc, resTy, operands);
+        tmpC = xegpu::DpasOp::create(rewriter, loc, resTy, operands);
         xegpu::setLayoutAttr(cast<OpResult>(tmpC),
                              originalLayout.dropSgLayoutAndData());
 
@@ -324,8 +324,8 @@ struct WgToSgPrefetchNdOp : public OpConversionPattern<xegpu::PrefetchNdOp> {
   matchAndRewrite(xegpu::PrefetchNdOp op, OneToNOpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     for (auto src : adaptor.getTensorDesc())
-      rewriter.create<xegpu::PrefetchNdOp>(op.getLoc(), TypeRange(), src,
-                                           op->getAttrs());
+      xegpu::PrefetchNdOp::create(rewriter, op.getLoc(), TypeRange(), src,
+                                  op->getAttrs());
     rewriter.eraseOp(op);
     return success();
   }

diff  --git a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
index 370d149ee55af..6fa249d57080a 100644
--- a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
+++ b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
@@ -223,8 +223,8 @@ xegpu::extractVectorsWithShapeFromValue(OpBuilder &builder, Location loc,
   SmallVector<Value> result;
   for (SmallVector<int64_t> offsets : StaticTileOffsetRange(srcShape, shape)) {
     SmallVector<int64_t> staticStrides(offsets.size(), 1);
-    result.push_back(builder.create<vector::ExtractStridedSliceOp>(
-        loc, value, offsets, shape, staticStrides));
+    result.push_back(vector::ExtractStridedSliceOp::create(
+        builder, loc, value, offsets, shape, staticStrides));
   }
 
   return result;
@@ -243,14 +243,14 @@ Value xegpu::createVectorWithShapeFromValues(OpBuilder &builder, Location loc,
 
   VectorType resultTy = VectorType::get(shape, elemTy);
   auto zeroAttr = builder.getZeroAttr(elemTy);
-  Value result = builder.create<arith::ConstantOp>(
-      loc, resultTy, DenseElementsAttr::get(resultTy, zeroAttr));
+  Value result = arith::ConstantOp::create(
+      builder, loc, resultTy, DenseElementsAttr::get(resultTy, zeroAttr));
 
   for (auto [src, offsets] :
        llvm::zip_equal(values, StaticTileOffsetRange(shape, tileShape))) {
     SmallVector<int64_t> staticStrides(offsets.size(), 1);
-    result = builder.create<vector::InsertStridedSliceOp>(
-        loc, src, result, offsets, staticStrides);
+    result = vector::InsertStridedSliceOp::create(builder, loc, src, result,
+                                                  offsets, staticStrides);
   }
   return result;
 }
@@ -261,7 +261,7 @@ void xegpu::doSCFStructuralTypeConversionWithTensorType(
 
   auto materializeCast = [](OpBuilder &builder, Type type, ValueRange inputs,
                             Location loc) -> Value {
-    return builder.create<UnrealizedConversionCastOp>(loc, type, inputs)
+    return UnrealizedConversionCastOp::create(builder, loc, type, inputs)
         .getResult(0);
   };
 
@@ -368,8 +368,8 @@ void xegpu::doSCFStructuralTypeConversionWithTensorType(
 
         if (isa<RankedTensorType>(inputTy) && isa<VectorType>(outputTy)) {
           SmallVector<Value> values = xegpu::flattenValues(adaptor.getInputs());
-          auto newOp = rewriter.create<UnrealizedConversionCastOp>(
-              op.getLoc(), outputTy, values);
+          auto newOp = UnrealizedConversionCastOp::create(rewriter, op.getLoc(),
+                                                          outputTy, values);
           rewriter.replaceOp(op, newOp);
           return success();
         }
@@ -380,7 +380,7 @@ void xegpu::doSCFStructuralTypeConversionWithTensorType(
     converter.addSourceMaterialization(materializeCast);
     converter.addTargetMaterialization([&](OpBuilder &builder, TypeRange type,
                                            ValueRange inputs, Location loc) {
-      return builder.create<UnrealizedConversionCastOp>(loc, type, inputs)
+      return UnrealizedConversionCastOp::create(builder, loc, type, inputs)
           .getResults();
     });
 


        


More information about the Mlir-commits mailing list