[Mlir-commits] [mlir] [mlir][NFC] update `mlir` create APIs (34/n) (PR #150660)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Jul 25 10:18:14 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-llvm
Author: Maksim Levental (makslevental)
<details>
<summary>Changes</summary>
See https://github.com/llvm/llvm-project/pull/147168 for more info.
---
Patch is 33.08 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/150660.diff
16 Files Affected:
- (modified) mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp (+4-4)
- (modified) mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp (+1-2)
- (modified) mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp (+1-2)
- (modified) mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp (+1-2)
- (modified) mlir/lib/Conversion/LLVMCommon/Pattern.cpp (+1-2)
- (modified) mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp (+1-1)
- (modified) mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp (+1-2)
- (modified) mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp (+6-6)
- (modified) mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp (+4-8)
- (modified) mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp (+19-38)
- (modified) mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp (+12-24)
- (modified) mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp (+1-2)
- (modified) mlir/lib/Target/LLVMIR/ModuleImport.cpp (+19-20)
- (modified) mlir/test/lib/Dialect/Shard/TestReshardingPartition.cpp (+4-5)
- (modified) mlir/test/lib/Dialect/Test/TestPatterns.cpp (+2-3)
- (modified) mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp (+5-7)
``````````diff
diff --git a/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp b/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
index 59b3fe2e4eaed..c3debf7afc865 100644
--- a/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
+++ b/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
@@ -402,7 +402,7 @@ class CastConversion : public OpConversionPattern<ArithOp> {
Value actualOp = adaptValueType(adaptor.getIn(), rewriter, castSrcType);
// Actual cast (may change bitwidth)
- auto cast = rewriter.template create<emitc::CastOp>(op.getLoc(),
+ auto cast = emitc::CastOp::create(rewriter, op.getLoc(),
castDestType, actualOp);
// Cast to the expected output type
@@ -507,7 +507,7 @@ class IntegerOpConversion final : public OpConversionPattern<ArithOp> {
Value lhs = adaptValueType(adaptor.getLhs(), rewriter, arithmeticType);
Value rhs = adaptValueType(adaptor.getRhs(), rewriter, arithmeticType);
- Value arithmeticResult = rewriter.template create<EmitCOp>(
+ Value arithmeticResult = EmitCOp::create(rewriter,
op.getLoc(), arithmeticType, lhs, rhs);
Value result = adaptValueType(arithmeticResult, rewriter, type);
@@ -547,7 +547,7 @@ class BitwiseOpConversion : public OpConversionPattern<ArithOp> {
Value lhs = adaptValueType(adaptor.getLhs(), rewriter, arithmeticType);
Value rhs = adaptValueType(adaptor.getRhs(), rewriter, arithmeticType);
- Value arithmeticResult = rewriter.template create<EmitCOp>(
+ Value arithmeticResult = EmitCOp::create(rewriter,
op.getLoc(), arithmeticType, lhs, rhs);
Value result = adaptValueType(arithmeticResult, rewriter, type);
@@ -748,7 +748,7 @@ class ItoFCastOpConversion : public OpConversionPattern<CastOp> {
}
Value fpCastOperand = adaptor.getIn();
if (actualOperandType != operandType) {
- fpCastOperand = rewriter.template create<emitc::CastOp>(
+ fpCastOperand = emitc::CastOp::create(rewriter,
castOp.getLoc(), actualOperandType, fpCastOperand);
}
rewriter.replaceOpWithNewOp<emitc::CastOp>(castOp, dstType, fpCastOperand);
diff --git a/mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp b/mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp
index 30a7170cf5c6a..d77cbfb1173c3 100644
--- a/mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp
+++ b/mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp
@@ -68,8 +68,7 @@ struct CloneOpConversion : public OpConversionPattern<bufferization::CloneOp> {
scf::YieldOp::create(rewriter, loc, acc);
};
- auto size = rewriter
- .create<scf::ForOp>(loc, zero, rank, one, ValueRange(one),
+ auto size = scf::ForOp::create(rewriter, loc, zero, rank, one, ValueRange(one),
loopBody)
.getResult(0);
diff --git a/mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp b/mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp
index c8311eb5a6433..f559fffbaf91f 100644
--- a/mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp
+++ b/mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp
@@ -144,8 +144,7 @@ ControlFlowToSCFTransformation::createUnreachableTerminator(Location loc,
return emitError(loc, "Cannot create unreachable terminator for '")
<< parentOp->getName() << "'";
- return builder
- .create<func::ReturnOp>(
+ return func::ReturnOp::create(builder,
loc, llvm::map_to_vector(funcOp.getResultTypes(),
[&](Type type) {
return getUndefValue(loc, builder, type);
diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
index a19194eb181fb..963f365cd688d 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
@@ -559,8 +559,7 @@ static Value createGroupReduceOpImpl(OpBuilder &builder, Location loc,
builder, loc, builder.getI32Type(),
builder.getIntegerAttr(builder.getI32Type(), *clusterSize));
- return builder
- .create<NonUniformOp>(loc, type, scope, groupOp, arg, clusterSizeValue)
+ return NonUniformOp::create(builder, loc, type, scope, groupOp, arg, clusterSizeValue)
.getResult();
}
diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
index ecd5b6367fba4..be58aa2e44e04 100644
--- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
@@ -273,8 +273,7 @@ LogicalResult ConvertToLLVMPattern::copyUnrankedDescriptors(
// Allocate memory, copy, and free the source if necessary.
Value memory =
toDynamic
- ? builder
- .create<LLVM::CallOp>(loc, mallocFunc.value(), allocationSize)
+ ? LLVM::CallOp::create(builder, loc, mallocFunc.value(), allocationSize)
.getResult()
: LLVM::AllocaOp::create(builder, loc, getPtrType(),
IntegerType::get(getContext(), 8),
diff --git a/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp b/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
index 5b68eb8188996..e5496e53ae529 100644
--- a/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
+++ b/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
@@ -35,7 +35,7 @@ static Op getOrDefineGlobal(ModuleOp &moduleOp, const Location loc,
if (!(ret = moduleOp.lookupSymbol<Op>(name))) {
ConversionPatternRewriter::InsertionGuard guard(rewriter);
rewriter.setInsertionPointToStart(moduleOp.getBody());
- ret = rewriter.template create<Op>(loc, std::forward<Args>(args)...);
+ ret = Op::create(rewriter, loc, std::forward<Args>(args)...);
}
return ret;
}
diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index 53a19129103a3..88c08eb792737 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -575,8 +575,7 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> {
Value sizePtr = LLVM::GEPOp::create(rewriter, loc, indexPtrTy,
getTypeConverter()->getIndexType(),
offsetPtr, idxPlusOne);
- return rewriter
- .create<LLVM::LoadOp>(loc, getTypeConverter()->getIndexType(), sizePtr)
+ return LLVM::LoadOp::create(rewriter, loc, getTypeConverter()->getIndexType(), sizePtr)
.getResult();
}
diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index aae3271371c1f..9b6154057b806 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -1493,11 +1493,11 @@ class ShiftPattern : public SPIRVToLLVMConversion<SPIRVOp> {
Value extended;
if (op2TypeWidth < dstTypeWidth) {
if (isUnsignedIntegerOrVector(op2Type)) {
- extended = rewriter.template create<LLVM::ZExtOp>(
- loc, dstType, adaptor.getOperand2());
+ extended =
+ LLVM::ZExtOp::create(rewriter, loc, dstType, adaptor.getOperand2());
} else {
- extended = rewriter.template create<LLVM::SExtOp>(
- loc, dstType, adaptor.getOperand2());
+ extended =
+ LLVM::SExtOp::create(rewriter, loc, dstType, adaptor.getOperand2());
}
} else if (op2TypeWidth == dstTypeWidth) {
extended = adaptor.getOperand2();
@@ -1505,8 +1505,8 @@ class ShiftPattern : public SPIRVToLLVMConversion<SPIRVOp> {
return failure();
}
- Value result = rewriter.template create<LLVMOp>(
- loc, dstType, adaptor.getOperand1(), extended);
+ Value result =
+ LLVMOp::create(rewriter, loc, dstType, adaptor.getOperand1(), extended);
rewriter.replaceOp(op, result);
return success();
}
diff --git a/mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp b/mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp
index 8525543760d99..34773f16f02a2 100644
--- a/mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp
+++ b/mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp
@@ -177,8 +177,7 @@ struct ConvertShardingOp : public OpConversionPattern<ShardingOp> {
auto type = RankedTensorType::get({nSplits, 2}, i64);
Value resHaloSizes =
haloSizes.empty()
- ? rewriter
- .create<tensor::EmptyOp>(loc, std::array<int64_t, 2>{0, 0},
+ ? tensor::EmptyOp::create(rewriter, loc, std::array<int64_t, 2>{0, 0},
i64)
.getResult()
: tensor::FromElementsOp::create(rewriter, loc, type, haloSizes)
@@ -307,8 +306,7 @@ class ConvertProcessLinearIndexOp
Value commWorld =
mpi::CommWorldOp::create(rewriter, loc, mpi::CommType::get(ctx));
auto rank =
- rewriter
- .create<mpi::CommRankOp>(
+ mpi::CommRankOp::create(rewriter,
loc,
TypeRange{mpi::RetvalType::get(ctx), rewriter.getI32Type()},
commWorld)
@@ -704,8 +702,7 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
for (auto &sz : haloSizes) {
if (auto value = dyn_cast<Value>(sz))
sz =
- rewriter
- .create<arith::IndexCastOp>(loc, rewriter.getIndexType(), value)
+ arith::IndexCastOp::create(rewriter, loc, rewriter.getIndexType(), value)
.getResult();
}
@@ -758,8 +755,7 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
assert(currHaloDim >= 0 && (size_t)currHaloDim < haloSizes.size() / 2);
// Get the linearized ids of the neighbors (down and up) for the
// given split
- auto tmp = rewriter
- .create<NeighborsLinearIndicesOp>(loc, grid, myMultiIndex,
+ auto tmp = NeighborsLinearIndicesOp::create(rewriter, loc, grid, myMultiIndex,
splitAxes)
.getResults();
// MPI operates on i32...
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index 5c7c027382977..b5792406b296f 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -569,8 +569,7 @@ static Value createLinalgBodyCalculationForElementwiseOp(
// to UIToFP.
if (srcTy.isUnsignedInteger() && isa<FloatType>(dstTy)) {
auto unrealizedCast =
- rewriter
- .create<UnrealizedConversionCastOp>(
+ UnrealizedConversionCastOp::create(rewriter,
loc, rewriter.getIntegerType(srcTy.getIntOrFloatBitWidth()),
args[0])
.getResult(0);
@@ -868,8 +867,7 @@ static Value broadcastDynamicDimension(PatternRewriter &rewriter, Location loc,
// Emit 'linalg.generic' op
auto resultTensor =
- opBuilder
- .create<linalg::GenericOp>(
+ linalg::GenericOp::create(opBuilder,
loc, outputTensor.getType(), operand, outputTensor, affineMaps,
getNParallelLoopsAttrs(rank),
[&](OpBuilder &opBuilder, Location loc, ValueRange blockArgs) {
@@ -1156,8 +1154,7 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
// First fill the output buffer with the init value.
auto emptyTensor =
- rewriter
- .create<tensor::EmptyOp>(loc, reduceShape, resultTy.getElementType(),
+ tensor::EmptyOp::create(rewriter, loc, reduceShape, resultTy.getElementType(),
dynDims)
.getResult();
@@ -1167,8 +1164,7 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
op, "No initial value found for reduction operation");
auto fillValue = arith::ConstantOp::create(rewriter, loc, fillValueAttr);
- auto filledTensor = rewriter
- .create<linalg::FillOp>(loc, ValueRange{fillValue},
+ auto filledTensor = linalg::FillOp::create(rewriter, loc, ValueRange{fillValue},
ValueRange{emptyTensor})
.result();
outputs.push_back(filledTensor);
@@ -1186,13 +1182,11 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
auto trueAttr = rewriter.getBoolAttr(true);
auto trueValue = arith::ConstantOp::create(rewriter, loc, trueAttr);
auto emptyBoolTensor =
- rewriter
- .create<tensor::EmptyOp>(loc, reduceShape, trueValue.getType(),
+ tensor::EmptyOp::create(rewriter, loc, reduceShape, trueValue.getType(),
dynDims)
.getResult();
auto allResultsNaNTensor =
- rewriter
- .create<linalg::FillOp>(loc, ValueRange{trueValue},
+ linalg::FillOp::create(rewriter, loc, ValueRange{trueValue},
ValueRange{emptyBoolTensor})
.result();
// Note that because the linalg::ReduceOp has two variadic arguments
@@ -1261,21 +1255,18 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
APFloat::getNaN(cast<FloatType>(elementTy).getFloatSemantics(), false));
auto nanValue = arith::ConstantOp::create(rewriter, loc, nanValueAttr);
auto emptyNanTensor =
- rewriter
- .create<tensor::EmptyOp>(loc, reduceShape,
+ tensor::EmptyOp::create(rewriter, loc, reduceShape,
resultTy.getElementType(), dynDims)
.getResult();
auto nanFilledTensor =
- rewriter
- .create<linalg::FillOp>(loc, ValueRange{nanValue},
+ linalg::FillOp::create(rewriter, loc, ValueRange{nanValue},
ValueRange{emptyNanTensor})
.result();
// Create an empty tensor, non need to fill this since it will be
// overwritten by the select.
auto finalEmptyTensor =
- rewriter
- .create<tensor::EmptyOp>(loc, reduceShape,
+ tensor::EmptyOp::create(rewriter, loc, reduceShape,
resultTy.getElementType(), dynDims)
.getResult();
@@ -1503,8 +1494,7 @@ class RescaleConverter : public OpRewritePattern<tosa::RescaleOp> {
Value shift = shiftConstant ? shiftConstant : blockArgs[shiftArg];
if (valueTy.isUnsignedInteger()) {
- value = nestedBuilder
- .create<UnrealizedConversionCastOp>(
+ value = UnrealizedConversionCastOp::create(nestedBuilder,
nestedLoc,
nestedBuilder.getIntegerType(
valueTy.getIntOrFloatBitWidth()),
@@ -1557,8 +1547,7 @@ class RescaleConverter : public OpRewritePattern<tosa::RescaleOp> {
}
if (outIntType.isUnsignedInteger()) {
- value = nestedBuilder
- .create<UnrealizedConversionCastOp>(nestedLoc,
+ value = UnrealizedConversionCastOp::create(nestedBuilder, nestedLoc,
outIntType, value)
.getResult(0);
}
@@ -2095,8 +2084,7 @@ class ReverseConverter : public OpRewritePattern<tosa::ReverseOp> {
Value axisDimSize = tensor::DimOp::create(rewriter, loc, input, axis);
// First fill the output buffer with the init value.
- auto emptyTensor = rewriter
- .create<tensor::EmptyOp>(loc, inputTy.getShape(),
+ auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, inputTy.getShape(),
inputTy.getElementType(),
ArrayRef<Value>({dynDims}))
.getResult();
@@ -2241,21 +2229,18 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
}
// First fill the output buffer for the index.
- auto emptyTensorIdx = rewriter
- .create<tensor::EmptyOp>(loc, resultTy.getShape(),
+ auto emptyTensorIdx = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
outElementTy, dynDims)
.getResult();
auto fillValueIdx = arith::ConstantOp::create(
rewriter, loc, rewriter.getIntegerAttr(outElementTy, 0));
auto filledTensorIdx =
- rewriter
- .create<linalg::FillOp>(loc, ValueRange{fillValueIdx},
+ linalg::FillOp::create(rewriter, loc, ValueRange{fillValueIdx},
ValueRange{emptyTensorIdx})
.result();
// Second fill the output buffer for the running max.
- auto emptyTensorMax = rewriter
- .create<tensor::EmptyOp>(loc, resultTy.getShape(),
+ auto emptyTensorMax = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
inElementTy, dynDims)
.getResult();
auto fillValueMaxAttr =
@@ -2268,8 +2253,7 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
auto fillValueMax =
arith::ConstantOp::create(rewriter, loc, fillValueMaxAttr);
auto filledTensorMax =
- rewriter
- .create<linalg::FillOp>(loc, ValueRange{fillValueMax},
+ linalg::FillOp::create(rewriter, loc, ValueRange{fillValueMax},
ValueRange{emptyTensorMax})
.result();
@@ -2371,8 +2355,7 @@ class GatherConverter : public OpConversionPattern<tosa::GatherOp> {
auto loc = op.getLoc();
auto emptyTensor =
- rewriter
- .create<tensor::EmptyOp>(loc, resultTy.getShape(), resultElementTy,
+ tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(), resultElementTy,
dynamicDims)
.getResult();
@@ -2448,8 +2431,7 @@ class TableConverter : public OpRewritePattern<tosa::TableOp> {
}
}
- auto emptyTensor = rewriter
- .create<tensor::EmptyOp>(loc, resultTy.getShape(),
+ auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
resultElementTy, dynDims)
.getResult();
@@ -2585,8 +2567,7 @@ struct RFFT2dConverter final : public OpRewritePattern<RFFT2dOp> {
tensor::EmptyOp::create(rewriter, loc, type, dynamicSizes);
auto fillValueAttr = rewriter.getZeroAttr(type.getElementType());
auto fillValue = arith::ConstantOp::create(rewriter, loc, fillValueAttr);
- auto filledTensor = rewriter
- .create<linalg::FillOp>(loc, ValueRange{fillValue},
+ auto filledTensor = linalg::FillOp::create(rewriter, loc, ValueRange{fillValue},
ValueRange{emptyTensor})
.result();
return filledTensor;
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
index 3a205246ddd9e..3d7d4e8f6afa4 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
@@ -64,8 +64,7 @@ linalgIntBroadcastExtSIAdd(PatternRewriter &rewriter, Location loc, Value bias,
Value conv, Value result,
ArrayRef<AffineMap> indexingMaps) {
ShapedType resultTy = cast<ShapedType>(conv.getType());
- return rewriter
- .create<linalg::GenericOp>(
+ return linalg::GenericOp::create(rewriter,
loc, resultTy, ValueRange({bias, conv}), result, indexingMaps,
getNParallelLoopsAttrs(resultTy.getRank()),
[](OpBuilder &builder, Location loc, ValueRange args) {
@@ -124,8 +123,7 @@ static mlir::Value linalgBroadcastAndMaybeExt(PatternRewriter &rewriter,
indexingMaps.push_back(rewriter.getMultiDimIdentityMap(resultRank));
// Build the broadcast-like operation ...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/150660
More information about the Mlir-commits
mailing list