[Mlir-commits] [mlir] [mlir] Update builders to use new form. (PR #154132)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Mon Aug 18 08:12:15 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir-emitc

Author: Jacques Pienaar (jpienaar)

<details>
<summary>Changes</summary>

Mechanically applied using clang-tidy.

---
Full diff: https://github.com/llvm/llvm-project/pull/154132.diff


9 Files Affected:

- (modified) mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp (+14-12) 
- (modified) mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp (+2-2) 
- (modified) mlir/lib/Dialect/EmitC/Transforms/WrapFuncInClass.cpp (+2-2) 
- (modified) mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp (+3-3) 
- (modified) mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp (+7-7) 
- (modified) mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp (+2-2) 
- (modified) mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp (+2-2) 
- (modified) mlir/lib/Target/Wasm/TranslateFromWasm.cpp (+16-15) 
- (modified) mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp (+1-1) 


``````````diff
diff --git a/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp
index a1f38c95935ad..2b7bdc9a7b7f8 100644
--- a/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp
+++ b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp
@@ -156,19 +156,21 @@ struct ConvertAlloc final : public OpConversionPattern<memref::AllocOp> {
     Type sizeTType = emitc::SizeTType::get(rewriter.getContext());
     Type elementType = memrefType.getElementType();
     IndexType indexType = rewriter.getIndexType();
-    emitc::CallOpaqueOp sizeofElementOp = rewriter.create<emitc::CallOpaqueOp>(
-        loc, sizeTType, rewriter.getStringAttr("sizeof"), ValueRange{},
+    emitc::CallOpaqueOp sizeofElementOp = emitc::CallOpaqueOp::create(
+        rewriter, loc, sizeTType, rewriter.getStringAttr("sizeof"),
+        ValueRange{},
         ArrayAttr::get(rewriter.getContext(), {TypeAttr::get(elementType)}));
 
     int64_t numElements = 1;
     for (int64_t dimSize : memrefType.getShape()) {
       numElements *= dimSize;
     }
-    Value numElementsValue = rewriter.create<emitc::ConstantOp>(
-        loc, indexType, rewriter.getIndexAttr(numElements));
+    Value numElementsValue = emitc::ConstantOp::create(
+        rewriter, loc, indexType, rewriter.getIndexAttr(numElements));
 
-    Value totalSizeBytes = rewriter.create<emitc::MulOp>(
-        loc, sizeTType, sizeofElementOp.getResult(0), numElementsValue);
+    Value totalSizeBytes =
+        emitc::MulOp::create(rewriter, loc, sizeTType,
+                             sizeofElementOp.getResult(0), numElementsValue);
 
     emitc::CallOpaqueOp allocCall;
     StringAttr allocFunctionName;
@@ -176,8 +178,8 @@ struct ConvertAlloc final : public OpConversionPattern<memref::AllocOp> {
     SmallVector<Value, 2> argsVec;
     if (allocOp.getAlignment()) {
       allocFunctionName = rewriter.getStringAttr(alignedAllocFunctionName);
-      alignmentValue = rewriter.create<emitc::ConstantOp>(
-          loc, sizeTType,
+      alignmentValue = emitc::ConstantOp::create(
+          rewriter, loc, sizeTType,
           rewriter.getIntegerAttr(indexType,
                                   allocOp.getAlignment().value_or(0)));
       argsVec.push_back(alignmentValue);
@@ -188,15 +190,15 @@ struct ConvertAlloc final : public OpConversionPattern<memref::AllocOp> {
     argsVec.push_back(totalSizeBytes);
     ValueRange args(argsVec);
 
-    allocCall = rewriter.create<emitc::CallOpaqueOp>(
-        loc,
+    allocCall = emitc::CallOpaqueOp::create(
+        rewriter, loc,
         emitc::PointerType::get(
             emitc::OpaqueType::get(rewriter.getContext(), "void")),
         allocFunctionName, args);
 
     emitc::PointerType targetPointerType = emitc::PointerType::get(elementType);
-    emitc::CastOp castOp = rewriter.create<emitc::CastOp>(
-        loc, targetPointerType, allocCall.getResult(0));
+    emitc::CastOp castOp = emitc::CastOp::create(
+        rewriter, loc, targetPointerType, allocCall.getResult(0));
 
     rewriter.replaceOp(allocOp, castOp);
     return success();
diff --git a/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp
index a51890248271f..a073a9acf752f 100644
--- a/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp
+++ b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp
@@ -33,8 +33,8 @@ namespace {
 emitc::IncludeOp addStandardHeader(OpBuilder &builder, ModuleOp module,
                                    StringRef headerName) {
   StringAttr includeAttr = builder.getStringAttr(headerName);
-  return builder.create<emitc::IncludeOp>(
-      module.getLoc(), includeAttr,
+  return emitc::IncludeOp::create(
+      builder, module.getLoc(), includeAttr,
       /*is_standard_include=*/builder.getUnitAttr());
 }
 
diff --git a/mlir/lib/Dialect/EmitC/Transforms/WrapFuncInClass.cpp b/mlir/lib/Dialect/EmitC/Transforms/WrapFuncInClass.cpp
index c55e26e722f33..06d7e07005f8a 100644
--- a/mlir/lib/Dialect/EmitC/Transforms/WrapFuncInClass.cpp
+++ b/mlir/lib/Dialect/EmitC/Transforms/WrapFuncInClass.cpp
@@ -64,8 +64,8 @@ class WrapFuncInClass : public OpRewritePattern<emitc::FuncOp> {
       TypeAttr typeAttr = TypeAttr::get(val.getType());
       fields.push_back({fieldName, typeAttr});
 
-      FieldOp fieldop = rewriter.create<emitc::FieldOp>(
-          funcOp->getLoc(), fieldName, typeAttr, nullptr);
+      FieldOp fieldop = emitc::FieldOp::create(rewriter, funcOp->getLoc(),
+                                               fieldName, typeAttr, nullptr);
 
       if (argAttrs && idx < argAttrs->size()) {
         fieldop->setDiscardableAttrs(funcOp.getArgAttrDict(idx));
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index d56506969662b..22690daa4f9e1 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -691,9 +691,9 @@ struct DropPadUnitDims : public OpRewritePattern<tensor::PadOp> {
 
     auto newResultType = RankedTensorType::get(
         newResultShape, padOp.getResultType().getElementType());
-    auto newPadOp = rewriter.create<tensor::PadOp>(
-        padOp.getLoc(), /*result=*/newResultType, collapsedSource, newLowPad,
-        newHighPad, paddingVal, padOp.getNofold());
+    auto newPadOp = tensor::PadOp::create(
+        rewriter, padOp.getLoc(), /*result=*/newResultType, collapsedSource,
+        newLowPad, newHighPad, paddingVal, padOp.getNofold());
 
     Value dest = padOp.getResult();
     if (options.rankReductionStrategy ==
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp b/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
index 9ec4af6d4581c..2650488c17993 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
@@ -52,11 +52,11 @@ FailureOr<Operation *> mlir::linalg::transposeMatmul(RewriterBase &rewriter,
     dynamicDims.push_back(tensor::DimOp::create(rewriter, loc, input, 0));
 
   ArrayRef<int64_t> shape = type.getShape();
-  Value empty = rewriter.create<tensor::EmptyOp>(
-      loc, ArrayRef<int64_t>{shape[1], shape[0]}, type.getElementType(),
-      dynamicDims);
-  auto transposeOp = rewriter.create<linalg::TransposeOp>(
-      loc, input, empty, ArrayRef<int64_t>{1, 0});
+  Value empty = tensor::EmptyOp::create(rewriter, loc,
+                                        ArrayRef<int64_t>{shape[1], shape[0]},
+                                        type.getElementType(), dynamicDims);
+  auto transposeOp = linalg::TransposeOp::create(rewriter, loc, input, empty,
+                                                 ArrayRef<int64_t>{1, 0});
   Operation *newMatmulOp;
   if (transposeLHS) {
     newMatmulOp = MatmulTransposeAOp::create(
@@ -112,8 +112,8 @@ mlir::linalg::transposeBatchMatmul(RewriterBase &rewriter,
   Value empty = tensor::EmptyOp::create(
       rewriter, loc, ArrayRef<int64_t>{shape[0], shape[2], shape[1]},
       type.getElementType(), dynamicDims);
-  auto transposeOp = rewriter.create<linalg::TransposeOp>(
-      loc, input, empty, ArrayRef<int64_t>{0, 2, 1});
+  auto transposeOp = linalg::TransposeOp::create(rewriter, loc, input, empty,
+                                                 ArrayRef<int64_t>{0, 2, 1});
   Operation *newMatmulOp;
   if (transposeLHS) {
     newMatmulOp = BatchMatmulTransposeAOp::create(
diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
index 1b26542ff65a3..8ea8cb1f45972 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
@@ -67,7 +67,7 @@ genOffsetsComputingInsts(OpBuilder &builder, Location loc,
        StaticTileOffsetRange(sizePerWg, distUnit)) {
     SmallVector<Value> base =
         llvm::map_to_vector(unitOffs, [&](int64_t d) -> Value {
-          return builder.create<arith::ConstantIndexOp>(loc, d);
+          return arith::ConstantIndexOp::create(builder, loc, d);
         });
 
     SmallVector<Value> adds = llvm::map_to_vector(
@@ -80,7 +80,7 @@ genOffsetsComputingInsts(OpBuilder &builder, Location loc,
         llvm::zip_equal(adds, sizePerWg), [&](const auto &t) -> Value {
           return builder.createOrFold<index::RemUOp>(
               loc, std::get<0>(t),
-              builder.create<arith::ConstantIndexOp>(loc, std::get<1>(t)));
+              arith::ConstantIndexOp::create(builder, loc, std::get<1>(t)));
         });
 
     offsets.push_back(mods);
diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
index 46ff03745a220..ecec186fe3fc9 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
@@ -166,7 +166,7 @@ struct WgToSgCreateNdOp : public OpConversionPattern<xegpu::CreateNdDescOp> {
       // Subtract startOfRange from the original subgroup id to get
       // the adjusted sg id
       Value startOfRangeVal =
-          rewriter.create<arith::ConstantIndexOp>(loc, startOfRange);
+          arith::ConstantIndexOp::create(rewriter, loc, startOfRange);
       linearSgId =
           rewriter.createOrFold<index::SubOp>(loc, linearSgId, startOfRangeVal);
     }
@@ -675,7 +675,7 @@ struct WgToSgArithConstantOp : public OpConversionPattern<arith::ConstantOp> {
     auto newType = VectorType::get(sgShape, vecType.getElementType());
     auto sgAttr = DenseElementsAttr::get(newType, singleVal);
     auto cstOp =
-        rewriter.create<arith::ConstantOp>(op.getLoc(), newType, sgAttr);
+        arith::ConstantOp::create(rewriter, op.getLoc(), newType, sgAttr);
     if (auto newLayout = layout.dropSgLayoutAndData())
       xegpu::setLayoutAttr(cstOp->getResult(0), newLayout);
     SmallVector<Value> newConsts(count, cstOp);
diff --git a/mlir/lib/Target/Wasm/TranslateFromWasm.cpp b/mlir/lib/Target/Wasm/TranslateFromWasm.cpp
index da811ba0954c2..8d450520629eb 100644
--- a/mlir/lib/Target/Wasm/TranslateFromWasm.cpp
+++ b/mlir/lib/Target/Wasm/TranslateFromWasm.cpp
@@ -780,8 +780,9 @@ parsed_inst_t ExpressionParser::parseConstInst(
   auto parsedConstant = parser.parseLiteral<valueT>();
   if (failed(parsedConstant))
     return failure();
-  auto constOp = builder.create<ConstOp>(
-      *currentOpLoc, buildLiteralAttr<valueT>(builder, *parsedConstant));
+  auto constOp =
+      ConstOp::create(builder, *currentOpLoc,
+                      buildLiteralAttr<valueT>(builder, *parsedConstant));
   return {{constOp.getResult()}};
 }
 
@@ -929,8 +930,8 @@ class WasmBinaryParser {
              << " type registration.";
     FunctionType type = symbols.moduleFuncTypes[tid.id];
     std::string symbol = symbols.getNewFuncSymbolName();
-    auto funcOp =
-        builder.create<FuncImportOp>(loc, symbol, moduleName, importName, type);
+    auto funcOp = FuncImportOp::create(builder, loc, symbol, moduleName,
+                                       importName, type);
     symbols.funcSymbols.push_back({{FlatSymbolRefAttr::get(funcOp)}, type});
     return funcOp.verify();
   }
@@ -939,8 +940,8 @@ class WasmBinaryParser {
   LogicalResult visitImport(Location loc, StringRef moduleName,
                             StringRef importName, LimitType limitType) {
     std::string symbol = symbols.getNewMemorySymbolName();
-    auto memOp = builder.create<MemImportOp>(loc, symbol, moduleName,
-                                             importName, limitType);
+    auto memOp = MemImportOp::create(builder, loc, symbol, moduleName,
+                                     importName, limitType);
     symbols.memSymbols.push_back({FlatSymbolRefAttr::get(memOp)});
     return memOp.verify();
   }
@@ -949,8 +950,8 @@ class WasmBinaryParser {
   LogicalResult visitImport(Location loc, StringRef moduleName,
                             StringRef importName, TableType tableType) {
     std::string symbol = symbols.getNewTableSymbolName();
-    auto tableOp = builder.create<TableImportOp>(loc, symbol, moduleName,
-                                                 importName, tableType);
+    auto tableOp = TableImportOp::create(builder, loc, symbol, moduleName,
+                                         importName, tableType);
     symbols.tableSymbols.push_back({FlatSymbolRefAttr::get(tableOp)});
     return tableOp.verify();
   }
@@ -960,8 +961,8 @@ class WasmBinaryParser {
                             StringRef importName, GlobalTypeRecord globalType) {
     std::string symbol = symbols.getNewGlobalSymbolName();
     auto giOp =
-        builder.create<GlobalImportOp>(loc, symbol, moduleName, importName,
-                                       globalType.type, globalType.isMutable);
+        GlobalImportOp::create(builder, loc, symbol, moduleName, importName,
+                               globalType.type, globalType.isMutable);
     symbols.globalSymbols.push_back(
         {{FlatSymbolRefAttr::get(giOp)}, giOp.getType()});
     return giOp.verify();
@@ -1012,7 +1013,7 @@ class WasmBinaryParser {
     if (failed(fillRegistry))
       return;
 
-    mOp = builder.create<ModuleOp>(getLocation());
+    mOp = ModuleOp::create(builder, getLocation());
     builder.setInsertionPointToStart(&mOp.getBodyRegion().front());
     LogicalResult parsingTypes = parseSection<WasmSectionType::TYPE>();
     if (failed(parsingTypes))
@@ -1172,7 +1173,7 @@ WasmBinaryParser::parseSectionItem<WasmSectionType::TABLE>(ParserHead &ph,
   LDBG() << "  Parsed table description: " << *tableType;
   StringAttr symbol = builder.getStringAttr(symbols.getNewTableSymbolName());
   auto tableOp =
-      builder.create<TableOp>(opLocation, symbol.strref(), *tableType);
+      TableOp::create(builder, opLocation, symbol.strref(), *tableType);
   symbols.tableSymbols.push_back({SymbolRefAttr::get(tableOp)});
   return success();
 }
@@ -1190,11 +1191,11 @@ WasmBinaryParser::parseSectionItem<WasmSectionType::FUNCTION>(ParserHead &ph,
     return emitError(getLocation(), "invalid type index: ") << typeIdx;
   std::string symbol = symbols.getNewFuncSymbolName();
   auto funcOp =
-      builder.create<FuncOp>(opLoc, symbol, symbols.moduleFuncTypes[typeIdx]);
+      FuncOp::create(builder, opLoc, symbol, symbols.moduleFuncTypes[typeIdx]);
   Block *block = funcOp.addEntryBlock();
   auto ip = builder.saveInsertionPoint();
   builder.setInsertionPointToEnd(block);
-  builder.create<ReturnOp>(opLoc);
+  ReturnOp::create(builder, opLoc);
   builder.restoreInsertionPoint(ip);
   symbols.funcSymbols.push_back(
       {{FlatSymbolRefAttr::get(funcOp.getSymNameAttr())},
@@ -1225,7 +1226,7 @@ WasmBinaryParser::parseSectionItem<WasmSectionType::MEMORY>(ParserHead &ph,
 
   LDBG() << "  Registering memory " << *memory;
   std::string symbol = symbols.getNewMemorySymbolName();
-  auto memOp = builder.create<MemOp>(opLocation, symbol, *memory);
+  auto memOp = MemOp::create(builder, opLocation, symbol, *memory);
   symbols.memSymbols.push_back({SymbolRefAttr::get(memOp)});
   return success();
 }
diff --git a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
index 3bea8efcdb0ae..58962714b7864 100644
--- a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
+++ b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
@@ -228,7 +228,7 @@ struct TestXeGPULayoutInterface
     auto materializeCast = [&](mlir::OpBuilder &builder, mlir::Type type,
                                mlir::ValueRange inputs,
                                mlir::Location loc) -> mlir::Value {
-      return builder.create<UnrealizedConversionCastOp>(loc, type, inputs)
+      return UnrealizedConversionCastOp::create(builder, loc, type, inputs)
           .getResult(0);
     };
     typeConverter.addSourceMaterialization(materializeCast);

``````````

</details>


https://github.com/llvm/llvm-project/pull/154132


More information about the Mlir-commits mailing list