[Mlir-commits] [mlir] 284a5c2 - [mlir][NFC] update `mlir/examples` create APIs (31/n) (#150652)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Jul 25 13:14:20 PDT 2025
Author: Maksim Levental
Date: 2025-07-25T16:14:16-04:00
New Revision: 284a5c2c0b97edddf255ea210f939203ad3d09f2
URL: https://github.com/llvm/llvm-project/commit/284a5c2c0b97edddf255ea210f939203ad3d09f2
DIFF: https://github.com/llvm/llvm-project/commit/284a5c2c0b97edddf255ea210f939203ad3d09f2.diff
LOG: [mlir][NFC] update `mlir/examples` create APIs (31/n) (#150652)
See https://github.com/llvm/llvm-project/pull/147168 for more info.
Added:
Modified:
mlir/docs/Interfaces.md
mlir/docs/PDLL.md
mlir/docs/Tutorials/QuickstartRewrites.md
mlir/docs/Tutorials/Toy/Ch-2.md
mlir/docs/Tutorials/Toy/Ch-4.md
mlir/docs/Tutorials/Toy/Ch-5.md
mlir/docs/Tutorials/Toy/Ch-6.md
mlir/docs/Tutorials/Toy/Ch-7.md
mlir/examples/toy/Ch2/include/toy/Ops.td
mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
mlir/examples/toy/Ch3/include/toy/Ops.td
mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
mlir/examples/toy/Ch4/include/toy/Ops.td
mlir/examples/toy/Ch4/mlir/Dialect.cpp
mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
mlir/examples/toy/Ch5/include/toy/Ops.td
mlir/examples/toy/Ch5/mlir/Dialect.cpp
mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
mlir/examples/toy/Ch6/include/toy/Ops.td
mlir/examples/toy/Ch6/mlir/Dialect.cpp
mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
mlir/examples/toy/Ch7/include/toy/Ops.td
mlir/examples/toy/Ch7/mlir/Dialect.cpp
mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
Removed:
################################################################################
diff --git a/mlir/docs/Interfaces.md b/mlir/docs/Interfaces.md
index bf590ac3351ee..7e1c5fe075675 100644
--- a/mlir/docs/Interfaces.md
+++ b/mlir/docs/Interfaces.md
@@ -563,7 +563,7 @@ def MyInterface : OpInterface<"MyInterface"> {
template <typename ConcreteOp>
struct Model : public Concept {
Operation *create(OpBuilder &builder, Location loc) const override {
- return builder.create<ConcreteOp>(loc);
+ return ConcreteOp::create(builder, loc);
}
}
};
@@ -574,7 +574,7 @@ def MyInterface : OpInterface<"MyInterface"> {
}],
"Operation *", "create", (ins "OpBuilder &":$builder, "Location":$loc),
/*methodBody=*/[{
- return builder.create<ConcreteOp>(loc);
+ return ConcreteOp::create(builder, loc);
}]>,
InterfaceMethod<[{
diff --git a/mlir/docs/PDLL.md b/mlir/docs/PDLL.md
index 9839d1d0df764..c6e352fd647da 100644
--- a/mlir/docs/PDLL.md
+++ b/mlir/docs/PDLL.md
@@ -1483,7 +1483,7 @@ be defined by specifying a string code block after the rewrite declaration:
```pdll
Rewrite BuildOp(value: Value) -> (foo: Op<my_dialect.foo>, bar: Op<my_dialect.bar>) [{
- return {rewriter.create<my_dialect::FooOp>(value), rewriter.create<my_dialect::BarOp>()};
+ return {my_dialect::FooOp::create(rewriter, value), my_dialect::BarOp::create(rewriter)};
}];
Pattern {
@@ -1508,7 +1508,7 @@ translated into:
```c++
std::tuple<my_dialect::FooOp, my_dialect::BarOp> BuildOp(Value value) {
- return {rewriter.create<my_dialect::FooOp>(value), rewriter.create<my_dialect::BarOp>()};
+ return {my_dialect::FooOp::create(rewriter, value), my_dialect::BarOp::create(rewriter)};
}
```
@@ -1530,7 +1530,7 @@ below describes the various result translation scenarios:
```pdll
Rewrite createOp() [{
- rewriter.create<my_dialect::FooOp>();
+ my_dialect::FooOp::create(rewriter);
}];
```
@@ -1538,7 +1538,7 @@ In the case where a native `Rewrite` has no results, the native function returns
```c++
void createOp(PatternRewriter &rewriter) {
- rewriter.create<my_dialect::FooOp>();
+ my_dialect::FooOp::create(rewriter);
}
```
@@ -1546,7 +1546,7 @@ void createOp(PatternRewriter &rewriter) {
```pdll
Rewrite createOp() -> Op<my_dialect.foo> [{
- return rewriter.create<my_dialect::FooOp>();
+ return my_dialect::FooOp::create(rewriter);
}];
```
@@ -1555,7 +1555,7 @@ native type for that single result:
```c++
my_dialect::FooOp createOp(PatternRewriter &rewriter) {
- return rewriter.create<my_dialect::FooOp>();
+ return my_dialect::FooOp::create(rewriter);
}
```
diff --git a/mlir/docs/Tutorials/QuickstartRewrites.md b/mlir/docs/Tutorials/QuickstartRewrites.md
index 0c890659b0eea..cbb6f03e93e65 100644
--- a/mlir/docs/Tutorials/QuickstartRewrites.md
+++ b/mlir/docs/Tutorials/QuickstartRewrites.md
@@ -130,7 +130,7 @@ def : Pat<(TF_LeakyReluOp:$old_value, $arg, F32Attr:$a),
```c++
static Value createTFLLeakyRelu(PatternRewriter &rewriter, Operation *op,
Value operand, Attribute attr) {
- return rewriter.create<mlir::TFL::LeakyReluOp>(
+ return mlir::TFL::LeakyReluOp::create(rewriter,
op->getLoc(), operands[0].getType(), /*arg=*/operands[0],
/*alpha=*/cast<FloatAttr>(attrs[0]));
}
@@ -194,10 +194,10 @@ LogicalResult circt::MulOp::canonicalize(MulOp op, PatternRewriter &rewriter) {
// mul(x, c) -> shl(x, log2(c)), where c is a power of two.
if (inputs.size() == 2 && matchPattern(inputs.back(), m_RConstant(value)) &&
value.isPowerOf2()) {
- auto shift = rewriter.create<rtl::ConstantOp>(op.getLoc(), op.getType(),
+ auto shift = rtl::ConstantOp::create(rewriter, op.getLoc(), op.getType(),
value.exactLogBase2());
auto shlOp =
- rewriter.create<comb::ShlOp>(op.getLoc(), inputs[0], shift);
+ comb::ShlOp::create(rewriter, op.getLoc(), inputs[0], shift);
rewriter.replaceOpWithNewOp<MulOp>(op, op.getType(),
ArrayRef<Value>(shlOp));
return success();
diff --git a/mlir/docs/Tutorials/Toy/Ch-2.md b/mlir/docs/Tutorials/Toy/Ch-2.md
index 039417c9c9a19..81e41615ee55d 100644
--- a/mlir/docs/Tutorials/Toy/Ch-2.md
+++ b/mlir/docs/Tutorials/Toy/Ch-2.md
@@ -521,7 +521,7 @@ def ConstantOp : Toy_Op<"constant"> {
// Add custom build methods for the constant operation. These methods populate
// the `state` that MLIR uses to create operations, i.e. these are used when
- // using `builder.create<ConstantOp>(...)`.
+ // using `ConstantOp::create(builder, ...)`.
let builders = [
// Build a constant with a given constant tensor value.
OpBuilder<(ins "DenseElementsAttr":$value), [{
diff --git a/mlir/docs/Tutorials/Toy/Ch-4.md b/mlir/docs/Tutorials/Toy/Ch-4.md
index 1275d36de3531..1bba269ca9a1e 100644
--- a/mlir/docs/Tutorials/Toy/Ch-4.md
+++ b/mlir/docs/Tutorials/Toy/Ch-4.md
@@ -300,7 +300,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
Operation *materializeCallConversion(OpBuilder &builder, Value input,
Type resultType,
Location conversionLoc) const final {
- return builder.create<CastOp>(conversionLoc, resultType, input);
+ return CastOp::create(builder, conversionLoc, resultType, input);
}
};
```
diff --git a/mlir/docs/Tutorials/Toy/Ch-5.md b/mlir/docs/Tutorials/Toy/Ch-5.md
index d483cd8bba21d..c750c07ddfc04 100644
--- a/mlir/docs/Tutorials/Toy/Ch-5.md
+++ b/mlir/docs/Tutorials/Toy/Ch-5.md
@@ -136,7 +136,7 @@ struct TransposeOpLowering : public mlir::ConversionPattern {
// Transpose the elements by generating a load from the reverse
// indices.
SmallVector<mlir::Value, 2> reverseIvs(llvm::reverse(loopIvs));
- return rewriter.create<mlir::AffineLoadOp>(loc, input, reverseIvs);
+ return mlir::AffineLoadOp::create(rewriter, loc, input, reverseIvs);
});
return success();
}
diff --git a/mlir/docs/Tutorials/Toy/Ch-6.md b/mlir/docs/Tutorials/Toy/Ch-6.md
index e8a68b5f9ee38..529de55304206 100644
--- a/mlir/docs/Tutorials/Toy/Ch-6.md
+++ b/mlir/docs/Tutorials/Toy/Ch-6.md
@@ -47,7 +47,7 @@ static FlatSymbolRefAttr getOrInsertPrintf(PatternRewriter &rewriter,
// Insert the printf function into the body of the parent module.
PatternRewriter::InsertionGuard insertGuard(rewriter);
rewriter.setInsertionPointToStart(module.getBody());
- rewriter.create<LLVM::LLVMFuncOp>(module.getLoc(), "printf", llvmFnType);
+ LLVM::LLVMFuncOp::create(rewriter, module.getLoc(), "printf", llvmFnType);
return SymbolRefAttr::get("printf", context);
}
```
diff --git a/mlir/docs/Tutorials/Toy/Ch-7.md b/mlir/docs/Tutorials/Toy/Ch-7.md
index dce3490aeace4..0f50c49a5f64d 100644
--- a/mlir/docs/Tutorials/Toy/Ch-7.md
+++ b/mlir/docs/Tutorials/Toy/Ch-7.md
@@ -488,9 +488,9 @@ mlir::Operation *ToyDialect::materializeConstant(mlir::OpBuilder &builder,
mlir::Type type,
mlir::Location loc) {
if (isa<StructType>(type))
- return builder.create<StructConstantOp>(loc, type,
+ return StructConstantOp::create(builder, loc, type,
cast<mlir::ArrayAttr>(value));
- return builder.create<ConstantOp>(loc, type,
+ return ConstantOp::create(builder, loc, type,
cast<mlir::DenseElementsAttr>(value));
}
```
diff --git a/mlir/examples/toy/Ch2/include/toy/Ops.td b/mlir/examples/toy/Ch2/include/toy/Ops.td
index ef65c9c8d682b..91bf83a54df1a 100644
--- a/mlir/examples/toy/Ch2/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch2/include/toy/Ops.td
@@ -70,7 +70,7 @@ def ConstantOp : Toy_Op<"constant", [Pure]> {
// Add custom build methods for the constant operation. These method populates
// the `state` that MLIR uses to create operations, i.e. these are used when
- // using `builder.create<ConstantOp>(...)`.
+ // using `ConstantOp::create(builder, ...)`.
let builders = [
// Build a constant with a given constant tensor value.
OpBuilder<(ins "DenseElementsAttr":$value), [{
diff --git a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
index 96925bebf07b7..39ae6a016eb41 100644
--- a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
@@ -121,8 +121,8 @@ class MLIRGenImpl {
llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
getType(VarType{}));
auto funcType = builder.getFunctionType(argTypes, {});
- return builder.create<mlir::toy::FuncOp>(location, proto.getName(),
- funcType);
+ return mlir::toy::FuncOp::create(builder, location, proto.getName(),
+ funcType);
}
/// Emit a new function and add it to the MLIR module.
@@ -166,7 +166,7 @@ class MLIRGenImpl {
if (!entryBlock.empty())
returnOp = dyn_cast<ReturnOp>(entryBlock.back());
if (!returnOp) {
- builder.create<ReturnOp>(loc(funcAST.getProto()->loc()));
+ ReturnOp::create(builder, loc(funcAST.getProto()->loc()));
} else if (returnOp.hasOperand()) {
// Otherwise, if this return operation has an operand then add a result to
// the function.
@@ -202,9 +202,9 @@ class MLIRGenImpl {
// support '+' and '*'.
switch (binop.getOp()) {
case '+':
- return builder.create<AddOp>(location, lhs, rhs);
+ return AddOp::create(builder, location, lhs, rhs);
case '*':
- return builder.create<MulOp>(location, lhs, rhs);
+ return MulOp::create(builder, location, lhs, rhs);
}
emitError(location, "invalid binary operator '") << binop.getOp() << "'";
@@ -235,8 +235,8 @@ class MLIRGenImpl {
}
// Otherwise, this return operation has zero operands.
- builder.create<ReturnOp>(location,
- expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
+ ReturnOp::create(builder, location,
+ expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
return mlir::success();
}
@@ -280,7 +280,7 @@ class MLIRGenImpl {
// Build the MLIR op `toy.constant`. This invokes the `ConstantOp::build`
// method.
- return builder.create<ConstantOp>(loc(lit.loc()), type, dataAttribute);
+ return ConstantOp::create(builder, loc(lit.loc()), type, dataAttribute);
}
/// Recursive helper function to accumulate the data that compose an array
@@ -325,13 +325,13 @@ class MLIRGenImpl {
"does not accept multiple arguments");
return nullptr;
}
- return builder.create<TransposeOp>(location, operands[0]);
+ return TransposeOp::create(builder, location, operands[0]);
}
// Otherwise this is a call to a user-defined function. Calls to
// user-defined functions are mapped to a custom call that takes the callee
// name as an attribute.
- return builder.create<GenericCallOp>(location, callee, operands);
+ return GenericCallOp::create(builder, location, callee, operands);
}
/// Emit a print expression. It emits specific operations for two builtins:
@@ -341,13 +341,13 @@ class MLIRGenImpl {
if (!arg)
return mlir::failure();
- builder.create<PrintOp>(loc(call.loc()), arg);
+ PrintOp::create(builder, loc(call.loc()), arg);
return mlir::success();
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
mlir::Value mlirGen(NumberExprAST &num) {
- return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
+ return ConstantOp::create(builder, loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
@@ -391,8 +391,8 @@ class MLIRGenImpl {
// with specific shape, we emit a "reshape" operation. It will get
// optimized out later as needed.
if (!vardecl.getType().shape.empty()) {
- value = builder.create<ReshapeOp>(loc(vardecl.loc()),
- getType(vardecl.getType()), value);
+ value = ReshapeOp::create(builder, loc(vardecl.loc()),
+ getType(vardecl.getType()), value);
}
// Register the value in the symbol table.
diff --git a/mlir/examples/toy/Ch3/include/toy/Ops.td b/mlir/examples/toy/Ch3/include/toy/Ops.td
index 485980420a20b..027b076af9e63 100644
--- a/mlir/examples/toy/Ch3/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch3/include/toy/Ops.td
@@ -69,7 +69,7 @@ def ConstantOp : Toy_Op<"constant", [Pure]> {
// Add custom build methods for the constant operation. These method populates
// the `state` that MLIR uses to create operations, i.e. these are used when
- // using `builder.create<ConstantOp>(...)`.
+ // using `ConstantOp::create(builder, ...)`.
let builders = [
// Build a constant with a given constant tensor value.
OpBuilder<(ins "DenseElementsAttr":$value), [{
diff --git a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
index c8cba82cb63a0..0573af699c1f4 100644
--- a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
@@ -121,8 +121,8 @@ class MLIRGenImpl {
llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
getType(VarType{}));
auto funcType = builder.getFunctionType(argTypes, /*results=*/{});
- return builder.create<mlir::toy::FuncOp>(location, proto.getName(),
- funcType);
+ return mlir::toy::FuncOp::create(builder, location, proto.getName(),
+ funcType);
}
/// Emit a new function and add it to the MLIR module.
@@ -166,7 +166,7 @@ class MLIRGenImpl {
if (!entryBlock.empty())
returnOp = dyn_cast<ReturnOp>(entryBlock.back());
if (!returnOp) {
- builder.create<ReturnOp>(loc(funcAST.getProto()->loc()));
+ ReturnOp::create(builder, loc(funcAST.getProto()->loc()));
} else if (returnOp.hasOperand()) {
// Otherwise, if this return operation has an operand then add a result to
// the function.
@@ -202,9 +202,9 @@ class MLIRGenImpl {
// support '+' and '*'.
switch (binop.getOp()) {
case '+':
- return builder.create<AddOp>(location, lhs, rhs);
+ return AddOp::create(builder, location, lhs, rhs);
case '*':
- return builder.create<MulOp>(location, lhs, rhs);
+ return MulOp::create(builder, location, lhs, rhs);
}
emitError(location, "invalid binary operator '") << binop.getOp() << "'";
@@ -235,8 +235,8 @@ class MLIRGenImpl {
}
// Otherwise, this return operation has zero operands.
- builder.create<ReturnOp>(location,
- expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
+ ReturnOp::create(builder, location,
+ expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
return mlir::success();
}
@@ -280,7 +280,7 @@ class MLIRGenImpl {
// Build the MLIR op `toy.constant`. This invokes the `ConstantOp::build`
// method.
- return builder.create<ConstantOp>(loc(lit.loc()), type, dataAttribute);
+ return ConstantOp::create(builder, loc(lit.loc()), type, dataAttribute);
}
/// Recursive helper function to accumulate the data that compose an array
@@ -325,13 +325,13 @@ class MLIRGenImpl {
"does not accept multiple arguments");
return nullptr;
}
- return builder.create<TransposeOp>(location, operands[0]);
+ return TransposeOp::create(builder, location, operands[0]);
}
// Otherwise this is a call to a user-defined function. Calls to
// user-defined functions are mapped to a custom call that takes the callee
// name as an attribute.
- return builder.create<GenericCallOp>(location, callee, operands);
+ return GenericCallOp::create(builder, location, callee, operands);
}
/// Emit a print expression. It emits specific operations for two builtins:
@@ -341,13 +341,13 @@ class MLIRGenImpl {
if (!arg)
return mlir::failure();
- builder.create<PrintOp>(loc(call.loc()), arg);
+ PrintOp::create(builder, loc(call.loc()), arg);
return mlir::success();
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
mlir::Value mlirGen(NumberExprAST &num) {
- return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
+ return ConstantOp::create(builder, loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
@@ -391,8 +391,8 @@ class MLIRGenImpl {
// with specific shape, we emit a "reshape" operation. It will get
// optimized out later as needed.
if (!vardecl.getType().shape.empty()) {
- value = builder.create<ReshapeOp>(loc(vardecl.loc()),
- getType(vardecl.getType()), value);
+ value = ReshapeOp::create(builder, loc(vardecl.loc()),
+ getType(vardecl.getType()), value);
}
// Register the value in the symbol table.
diff --git a/mlir/examples/toy/Ch4/include/toy/Ops.td b/mlir/examples/toy/Ch4/include/toy/Ops.td
index 0b32b1b0c7726..6c6b73937aaf8 100644
--- a/mlir/examples/toy/Ch4/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch4/include/toy/Ops.td
@@ -72,7 +72,7 @@ def ConstantOp : Toy_Op<"constant", [Pure]> {
// Add custom build methods for the constant operation. These method populates
// the `state` that MLIR uses to create operations, i.e. these are used when
- // using `builder.create<ConstantOp>(...)`.
+ // using `ConstantOp::create(builder, ...)`.
let builders = [
// Build a constant with a given constant tensor value.
OpBuilder<(ins "DenseElementsAttr":$value), [{
diff --git a/mlir/examples/toy/Ch4/mlir/Dialect.cpp b/mlir/examples/toy/Ch4/mlir/Dialect.cpp
index 076a75a26619b..1e5e67296a753 100644
--- a/mlir/examples/toy/Ch4/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch4/mlir/Dialect.cpp
@@ -91,7 +91,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
Operation *materializeCallConversion(OpBuilder &builder, Value input,
Type resultType,
Location conversionLoc) const final {
- return builder.create<CastOp>(conversionLoc, resultType, input);
+ return CastOp::create(builder, conversionLoc, resultType, input);
}
};
@@ -206,7 +206,8 @@ void ConstantOp::print(mlir::OpAsmPrinter &printer) {
llvm::LogicalResult ConstantOp::verify() {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType = llvm::dyn_cast<mlir::RankedTensorType>(getResult().getType());
+ auto resultType =
+ llvm::dyn_cast<mlir::RankedTensorType>(getResult().getType());
if (!resultType)
return success();
@@ -395,7 +396,8 @@ llvm::LogicalResult ReturnOp::verify() {
auto resultType = results.front();
// Check that the result type of the function matches the operand type.
- if (inputType == resultType || llvm::isa<mlir::UnrankedTensorType>(inputType) ||
+ if (inputType == resultType ||
+ llvm::isa<mlir::UnrankedTensorType>(inputType) ||
llvm::isa<mlir::UnrankedTensorType>(resultType))
return mlir::success();
diff --git a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
index 9371815577b1b..7d676f1b39200 100644
--- a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
@@ -121,8 +121,8 @@ class MLIRGenImpl {
llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
getType(VarType{}));
auto funcType = builder.getFunctionType(argTypes, /*results=*/{});
- return builder.create<mlir::toy::FuncOp>(location, proto.getName(),
- funcType);
+ return mlir::toy::FuncOp::create(builder, location, proto.getName(),
+ funcType);
}
/// Emit a new function and add it to the MLIR module.
@@ -166,7 +166,7 @@ class MLIRGenImpl {
if (!entryBlock.empty())
returnOp = dyn_cast<ReturnOp>(entryBlock.back());
if (!returnOp) {
- builder.create<ReturnOp>(loc(funcAST.getProto()->loc()));
+ ReturnOp::create(builder, loc(funcAST.getProto()->loc()));
} else if (returnOp.hasOperand()) {
// Otherwise, if this return operation has an operand then add a result to
// the function.
@@ -206,9 +206,9 @@ class MLIRGenImpl {
// support '+' and '*'.
switch (binop.getOp()) {
case '+':
- return builder.create<AddOp>(location, lhs, rhs);
+ return AddOp::create(builder, location, lhs, rhs);
case '*':
- return builder.create<MulOp>(location, lhs, rhs);
+ return MulOp::create(builder, location, lhs, rhs);
}
emitError(location, "invalid binary operator '") << binop.getOp() << "'";
@@ -239,8 +239,8 @@ class MLIRGenImpl {
}
// Otherwise, this return operation has zero operands.
- builder.create<ReturnOp>(location,
- expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
+ ReturnOp::create(builder, location,
+ expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
return mlir::success();
}
@@ -284,7 +284,7 @@ class MLIRGenImpl {
// Build the MLIR op `toy.constant`. This invokes the `ConstantOp::build`
// method.
- return builder.create<ConstantOp>(loc(lit.loc()), type, dataAttribute);
+ return ConstantOp::create(builder, loc(lit.loc()), type, dataAttribute);
}
/// Recursive helper function to accumulate the data that compose an array
@@ -329,13 +329,13 @@ class MLIRGenImpl {
"does not accept multiple arguments");
return nullptr;
}
- return builder.create<TransposeOp>(location, operands[0]);
+ return TransposeOp::create(builder, location, operands[0]);
}
// Otherwise this is a call to a user-defined function. Calls to
// user-defined functions are mapped to a custom call that takes the callee
// name as an attribute.
- return builder.create<GenericCallOp>(location, callee, operands);
+ return GenericCallOp::create(builder, location, callee, operands);
}
/// Emit a print expression. It emits specific operations for two builtins:
@@ -345,13 +345,13 @@ class MLIRGenImpl {
if (!arg)
return mlir::failure();
- builder.create<PrintOp>(loc(call.loc()), arg);
+ PrintOp::create(builder, loc(call.loc()), arg);
return mlir::success();
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
mlir::Value mlirGen(NumberExprAST &num) {
- return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
+ return ConstantOp::create(builder, loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
@@ -395,8 +395,8 @@ class MLIRGenImpl {
// with specific shape, we emit a "reshape" operation. It will get
// optimized out later as needed.
if (!vardecl.getType().shape.empty()) {
- value = builder.create<ReshapeOp>(loc(vardecl.loc()),
- getType(vardecl.getType()), value);
+ value = ReshapeOp::create(builder, loc(vardecl.loc()),
+ getType(vardecl.getType()), value);
}
// Register the value in the symbol table.
diff --git a/mlir/examples/toy/Ch5/include/toy/Ops.td b/mlir/examples/toy/Ch5/include/toy/Ops.td
index d11d18dc93cf1..6a136ec76e3d1 100644
--- a/mlir/examples/toy/Ch5/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch5/include/toy/Ops.td
@@ -72,7 +72,7 @@ def ConstantOp : Toy_Op<"constant", [Pure]> {
// Add custom build methods for the constant operation. These method populates
// the `state` that MLIR uses to create operations, i.e. these are used when
- // using `builder.create<ConstantOp>(...)`.
+ // using `ConstantOp::create(builder, ...)`.
let builders = [
// Build a constant with a given constant tensor value.
OpBuilder<(ins "DenseElementsAttr":$value), [{
diff --git a/mlir/examples/toy/Ch5/mlir/Dialect.cpp b/mlir/examples/toy/Ch5/mlir/Dialect.cpp
index fb7c742a01802..69fb69fd6e7ba 100644
--- a/mlir/examples/toy/Ch5/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch5/mlir/Dialect.cpp
@@ -91,7 +91,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
Operation *materializeCallConversion(OpBuilder &builder, Value input,
Type resultType,
Location conversionLoc) const final {
- return builder.create<CastOp>(conversionLoc, resultType, input);
+ return CastOp::create(builder, conversionLoc, resultType, input);
}
};
@@ -206,7 +206,8 @@ void ConstantOp::print(mlir::OpAsmPrinter &printer) {
llvm::LogicalResult ConstantOp::verify() {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType = llvm::dyn_cast<mlir::RankedTensorType>(getResult().getType());
+ auto resultType =
+ llvm::dyn_cast<mlir::RankedTensorType>(getResult().getType());
if (!resultType)
return success();
@@ -395,7 +396,8 @@ llvm::LogicalResult ReturnOp::verify() {
auto resultType = results.front();
// Check that the result type of the function matches the operand type.
- if (inputType == resultType || llvm::isa<mlir::UnrankedTensorType>(inputType) ||
+ if (inputType == resultType ||
+ llvm::isa<mlir::UnrankedTensorType>(inputType) ||
llvm::isa<mlir::UnrankedTensorType>(resultType))
return mlir::success();
diff --git a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
index bf2bc43301a33..d65c89c3fcfa6 100644
--- a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
@@ -55,7 +55,7 @@ static MemRefType convertTensorToMemRef(RankedTensorType type) {
/// Insert an allocation and deallocation for the given MemRefType.
static Value insertAllocAndDealloc(MemRefType type, Location loc,
PatternRewriter &rewriter) {
- auto alloc = rewriter.create<memref::AllocOp>(loc, type);
+ auto alloc = memref::AllocOp::create(rewriter, loc, type);
// Make sure to allocate at the beginning of the block.
auto *parentBlock = alloc->getBlock();
@@ -63,7 +63,7 @@ static Value insertAllocAndDealloc(MemRefType type, Location loc,
// Make sure to deallocate this alloc at the end of the block. This is fine
// as toy functions have no control flow.
- auto dealloc = rewriter.create<memref::DeallocOp>(loc, alloc);
+ auto dealloc = memref::DeallocOp::create(rewriter, loc, alloc);
dealloc->moveBefore(&parentBlock->back());
return alloc;
}
@@ -99,8 +99,8 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
// and the loop induction variables. This function will return the value
// to store at the current index.
Value valueToStore = processIteration(nestedBuilder, operands, ivs);
- nestedBuilder.create<affine::AffineStoreOp>(loc, valueToStore, alloc,
- ivs);
+ affine::AffineStoreOp::create(nestedBuilder, loc, valueToStore, alloc,
+ ivs);
});
// Replace this operation with the generated alloc.
@@ -131,15 +131,15 @@ struct BinaryOpLowering : public ConversionPattern {
// Generate loads for the element of 'lhs' and 'rhs' at the
// inner loop.
- auto loadedLhs = builder.create<affine::AffineLoadOp>(
- loc, binaryAdaptor.getLhs(), loopIvs);
- auto loadedRhs = builder.create<affine::AffineLoadOp>(
- loc, binaryAdaptor.getRhs(), loopIvs);
+ auto loadedLhs = affine::AffineLoadOp::create(
+ builder, loc, binaryAdaptor.getLhs(), loopIvs);
+ auto loadedRhs = affine::AffineLoadOp::create(
+ builder, loc, binaryAdaptor.getRhs(), loopIvs);
// Create the binary operation performed on the loaded
// values.
- return builder.create<LoweredBinaryOp>(loc, loadedLhs,
- loadedRhs);
+ return LoweredBinaryOp::create(builder, loc, loadedLhs,
+ loadedRhs);
});
return success();
}
@@ -174,11 +174,11 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
if (!valueShape.empty()) {
for (auto i : llvm::seq<int64_t>(0, *llvm::max_element(valueShape)))
constantIndices.push_back(
- rewriter.create<arith::ConstantIndexOp>(loc, i));
+ arith::ConstantIndexOp::create(rewriter, loc, i));
} else {
// This is the case of a tensor of rank 0.
constantIndices.push_back(
- rewriter.create<arith::ConstantIndexOp>(loc, 0));
+ arith::ConstantIndexOp::create(rewriter, loc, 0));
}
// The constant operation represents a multi-dimensional constant, so we
@@ -191,9 +191,9 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// The last dimension is the base case of the recursion, at this point
// we store the element at the given index.
if (dimension == valueShape.size()) {
- rewriter.create<affine::AffineStoreOp>(
- loc, rewriter.create<arith::ConstantOp>(loc, *valueIt++), alloc,
- llvm::ArrayRef(indices));
+ affine::AffineStoreOp::create(
+ rewriter, loc, arith::ConstantOp::create(rewriter, loc, *valueIt++),
+ alloc, llvm::ArrayRef(indices));
return;
}
@@ -238,8 +238,8 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
}
// Create a new non-toy function, with the same region.
- auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
- op.getFunctionType());
+ auto func = mlir::func::FuncOp::create(rewriter, op.getLoc(), op.getName(),
+ op.getFunctionType());
rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
rewriter.eraseOp(op);
return success();
@@ -308,8 +308,8 @@ struct TransposeOpLowering : public ConversionPattern {
// Transpose the elements by generating a load from the
// reverse indices.
SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
- return builder.create<affine::AffineLoadOp>(loc, input,
- reverseIvs);
+ return affine::AffineLoadOp::create(builder, loc, input,
+ reverseIvs);
});
return success();
}
diff --git a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
index 9371815577b1b..7d676f1b39200 100644
--- a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
@@ -121,8 +121,8 @@ class MLIRGenImpl {
llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
getType(VarType{}));
auto funcType = builder.getFunctionType(argTypes, /*results=*/{});
- return builder.create<mlir::toy::FuncOp>(location, proto.getName(),
- funcType);
+ return mlir::toy::FuncOp::create(builder, location, proto.getName(),
+ funcType);
}
/// Emit a new function and add it to the MLIR module.
@@ -166,7 +166,7 @@ class MLIRGenImpl {
if (!entryBlock.empty())
returnOp = dyn_cast<ReturnOp>(entryBlock.back());
if (!returnOp) {
- builder.create<ReturnOp>(loc(funcAST.getProto()->loc()));
+ ReturnOp::create(builder, loc(funcAST.getProto()->loc()));
} else if (returnOp.hasOperand()) {
// Otherwise, if this return operation has an operand then add a result to
// the function.
@@ -206,9 +206,9 @@ class MLIRGenImpl {
// support '+' and '*'.
switch (binop.getOp()) {
case '+':
- return builder.create<AddOp>(location, lhs, rhs);
+ return AddOp::create(builder, location, lhs, rhs);
case '*':
- return builder.create<MulOp>(location, lhs, rhs);
+ return MulOp::create(builder, location, lhs, rhs);
}
emitError(location, "invalid binary operator '") << binop.getOp() << "'";
@@ -239,8 +239,8 @@ class MLIRGenImpl {
}
// Otherwise, this return operation has zero operands.
- builder.create<ReturnOp>(location,
- expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
+ ReturnOp::create(builder, location,
+ expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
return mlir::success();
}
@@ -284,7 +284,7 @@ class MLIRGenImpl {
// Build the MLIR op `toy.constant`. This invokes the `ConstantOp::build`
// method.
- return builder.create<ConstantOp>(loc(lit.loc()), type, dataAttribute);
+ return ConstantOp::create(builder, loc(lit.loc()), type, dataAttribute);
}
/// Recursive helper function to accumulate the data that compose an array
@@ -329,13 +329,13 @@ class MLIRGenImpl {
"does not accept multiple arguments");
return nullptr;
}
- return builder.create<TransposeOp>(location, operands[0]);
+ return TransposeOp::create(builder, location, operands[0]);
}
// Otherwise this is a call to a user-defined function. Calls to
// user-defined functions are mapped to a custom call that takes the callee
// name as an attribute.
- return builder.create<GenericCallOp>(location, callee, operands);
+ return GenericCallOp::create(builder, location, callee, operands);
}
/// Emit a print expression. It emits specific operations for two builtins:
@@ -345,13 +345,13 @@ class MLIRGenImpl {
if (!arg)
return mlir::failure();
- builder.create<PrintOp>(loc(call.loc()), arg);
+ PrintOp::create(builder, loc(call.loc()), arg);
return mlir::success();
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
mlir::Value mlirGen(NumberExprAST &num) {
- return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
+ return ConstantOp::create(builder, loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
@@ -395,8 +395,8 @@ class MLIRGenImpl {
// with specific shape, we emit a "reshape" operation. It will get
// optimized out later as needed.
if (!vardecl.getType().shape.empty()) {
- value = builder.create<ReshapeOp>(loc(vardecl.loc()),
- getType(vardecl.getType()), value);
+ value = ReshapeOp::create(builder, loc(vardecl.loc()),
+ getType(vardecl.getType()), value);
}
// Register the value in the symbol table.
diff --git a/mlir/examples/toy/Ch6/include/toy/Ops.td b/mlir/examples/toy/Ch6/include/toy/Ops.td
index 63950f467455c..897b36d6135f4 100644
--- a/mlir/examples/toy/Ch6/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch6/include/toy/Ops.td
@@ -72,7 +72,7 @@ def ConstantOp : Toy_Op<"constant", [Pure]> {
// Add custom build methods for the constant operation. These method populates
// the `state` that MLIR uses to create operations, i.e. these are used when
- // using `builder.create<ConstantOp>(...)`.
+ // using `ConstantOp::create(builder, ...)`.
let builders = [
// Build a constant with a given constant tensor value.
OpBuilder<(ins "DenseElementsAttr":$value), [{
diff --git a/mlir/examples/toy/Ch6/mlir/Dialect.cpp b/mlir/examples/toy/Ch6/mlir/Dialect.cpp
index fb7c742a01802..69fb69fd6e7ba 100644
--- a/mlir/examples/toy/Ch6/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch6/mlir/Dialect.cpp
@@ -91,7 +91,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
Operation *materializeCallConversion(OpBuilder &builder, Value input,
Type resultType,
Location conversionLoc) const final {
- return builder.create<CastOp>(conversionLoc, resultType, input);
+ return CastOp::create(builder, conversionLoc, resultType, input);
}
};
@@ -206,7 +206,8 @@ void ConstantOp::print(mlir::OpAsmPrinter &printer) {
llvm::LogicalResult ConstantOp::verify() {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType = llvm::dyn_cast<mlir::RankedTensorType>(getResult().getType());
+ auto resultType =
+ llvm::dyn_cast<mlir::RankedTensorType>(getResult().getType());
if (!resultType)
return success();
@@ -395,7 +396,8 @@ llvm::LogicalResult ReturnOp::verify() {
auto resultType = results.front();
// Check that the result type of the function matches the operand type.
- if (inputType == resultType || llvm::isa<mlir::UnrankedTensorType>(inputType) ||
+ if (inputType == resultType ||
+ llvm::isa<mlir::UnrankedTensorType>(inputType) ||
llvm::isa<mlir::UnrankedTensorType>(resultType))
return mlir::success();
diff --git a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
index bf2bc43301a33..d65c89c3fcfa6 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
@@ -55,7 +55,7 @@ static MemRefType convertTensorToMemRef(RankedTensorType type) {
/// Insert an allocation and deallocation for the given MemRefType.
static Value insertAllocAndDealloc(MemRefType type, Location loc,
PatternRewriter &rewriter) {
- auto alloc = rewriter.create<memref::AllocOp>(loc, type);
+ auto alloc = memref::AllocOp::create(rewriter, loc, type);
// Make sure to allocate at the beginning of the block.
auto *parentBlock = alloc->getBlock();
@@ -63,7 +63,7 @@ static Value insertAllocAndDealloc(MemRefType type, Location loc,
// Make sure to deallocate this alloc at the end of the block. This is fine
// as toy functions have no control flow.
- auto dealloc = rewriter.create<memref::DeallocOp>(loc, alloc);
+ auto dealloc = memref::DeallocOp::create(rewriter, loc, alloc);
dealloc->moveBefore(&parentBlock->back());
return alloc;
}
@@ -99,8 +99,8 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
// and the loop induction variables. This function will return the value
// to store at the current index.
Value valueToStore = processIteration(nestedBuilder, operands, ivs);
- nestedBuilder.create<affine::AffineStoreOp>(loc, valueToStore, alloc,
- ivs);
+ affine::AffineStoreOp::create(nestedBuilder, loc, valueToStore, alloc,
+ ivs);
});
// Replace this operation with the generated alloc.
@@ -131,15 +131,15 @@ struct BinaryOpLowering : public ConversionPattern {
// Generate loads for the element of 'lhs' and 'rhs' at the
// inner loop.
- auto loadedLhs = builder.create<affine::AffineLoadOp>(
- loc, binaryAdaptor.getLhs(), loopIvs);
- auto loadedRhs = builder.create<affine::AffineLoadOp>(
- loc, binaryAdaptor.getRhs(), loopIvs);
+ auto loadedLhs = affine::AffineLoadOp::create(
+ builder, loc, binaryAdaptor.getLhs(), loopIvs);
+ auto loadedRhs = affine::AffineLoadOp::create(
+ builder, loc, binaryAdaptor.getRhs(), loopIvs);
// Create the binary operation performed on the loaded
// values.
- return builder.create<LoweredBinaryOp>(loc, loadedLhs,
- loadedRhs);
+ return LoweredBinaryOp::create(builder, loc, loadedLhs,
+ loadedRhs);
});
return success();
}
@@ -174,11 +174,11 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
if (!valueShape.empty()) {
for (auto i : llvm::seq<int64_t>(0, *llvm::max_element(valueShape)))
constantIndices.push_back(
- rewriter.create<arith::ConstantIndexOp>(loc, i));
+ arith::ConstantIndexOp::create(rewriter, loc, i));
} else {
// This is the case of a tensor of rank 0.
constantIndices.push_back(
- rewriter.create<arith::ConstantIndexOp>(loc, 0));
+ arith::ConstantIndexOp::create(rewriter, loc, 0));
}
// The constant operation represents a multi-dimensional constant, so we
@@ -191,9 +191,9 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// The last dimension is the base case of the recursion, at this point
// we store the element at the given index.
if (dimension == valueShape.size()) {
- rewriter.create<affine::AffineStoreOp>(
- loc, rewriter.create<arith::ConstantOp>(loc, *valueIt++), alloc,
- llvm::ArrayRef(indices));
+ affine::AffineStoreOp::create(
+ rewriter, loc, arith::ConstantOp::create(rewriter, loc, *valueIt++),
+ alloc, llvm::ArrayRef(indices));
return;
}
@@ -238,8 +238,8 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
}
// Create a new non-toy function, with the same region.
- auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
- op.getFunctionType());
+ auto func = mlir::func::FuncOp::create(rewriter, op.getLoc(), op.getName(),
+ op.getFunctionType());
rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
rewriter.eraseOp(op);
return success();
@@ -308,8 +308,8 @@ struct TransposeOpLowering : public ConversionPattern {
// Transpose the elements by generating a load from the
// reverse indices.
SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
- return builder.create<affine::AffineLoadOp>(loc, input,
- reverseIvs);
+ return affine::AffineLoadOp::create(builder, loc, input,
+ reverseIvs);
});
return success();
}
diff --git a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
index 22f75e042fe08..e0950ef56f4fe 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
@@ -86,12 +86,12 @@ class PrintOpLowering : public ConversionPattern {
// Create a loop for each of the dimensions within the shape.
SmallVector<Value, 4> loopIvs;
for (unsigned i = 0, e = memRefShape.size(); i != e; ++i) {
- auto lowerBound = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+ auto lowerBound = arith::ConstantIndexOp::create(rewriter, loc, 0);
auto upperBound =
- rewriter.create<arith::ConstantIndexOp>(loc, memRefShape[i]);
- auto step = rewriter.create<arith::ConstantIndexOp>(loc, 1);
+ arith::ConstantIndexOp::create(rewriter, loc, memRefShape[i]);
+ auto step = arith::ConstantIndexOp::create(rewriter, loc, 1);
auto loop =
- rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step);
+ scf::ForOp::create(rewriter, loc, lowerBound, upperBound, step);
for (Operation &nested : make_early_inc_range(*loop.getBody()))
rewriter.eraseOp(&nested);
loopIvs.push_back(loop.getInductionVar());
@@ -101,19 +101,18 @@ class PrintOpLowering : public ConversionPattern {
// Insert a newline after each of the inner dimensions of the shape.
if (i != e - 1)
- rewriter.create<LLVM::CallOp>(loc, getPrintfType(context), printfRef,
- newLineCst);
- rewriter.create<scf::YieldOp>(loc);
+ LLVM::CallOp::create(rewriter, loc, getPrintfType(context), printfRef,
+ newLineCst);
+ scf::YieldOp::create(rewriter, loc);
rewriter.setInsertionPointToStart(loop.getBody());
}
// Generate a call to printf for the current element of the loop.
auto printOp = cast<toy::PrintOp>(op);
auto elementLoad =
- rewriter.create<memref::LoadOp>(loc, printOp.getInput(), loopIvs);
- rewriter.create<LLVM::CallOp>(
- loc, getPrintfType(context), printfRef,
- ArrayRef<Value>({formatSpecifierCst, elementLoad}));
+ memref::LoadOp::create(rewriter, loc, printOp.getInput(), loopIvs);
+ LLVM::CallOp::create(rewriter, loc, getPrintfType(context), printfRef,
+ ArrayRef<Value>({formatSpecifierCst, elementLoad}));
// Notify the rewriter that this operation has been removed.
rewriter.eraseOp(op);
@@ -142,8 +141,8 @@ class PrintOpLowering : public ConversionPattern {
// Insert the printf function into the body of the parent module.
PatternRewriter::InsertionGuard insertGuard(rewriter);
rewriter.setInsertionPointToStart(module.getBody());
- rewriter.create<LLVM::LLVMFuncOp>(module.getLoc(), "printf",
- getPrintfType(context));
+ LLVM::LLVMFuncOp::create(rewriter, module.getLoc(), "printf",
+ getPrintfType(context));
return SymbolRefAttr::get(context, "printf");
}
@@ -159,19 +158,19 @@ class PrintOpLowering : public ConversionPattern {
builder.setInsertionPointToStart(module.getBody());
auto type = LLVM::LLVMArrayType::get(
IntegerType::get(builder.getContext(), 8), value.size());
- global = builder.create<LLVM::GlobalOp>(loc, type, /*isConstant=*/true,
- LLVM::Linkage::Internal, name,
- builder.getStringAttr(value),
- /*alignment=*/0);
+ global = LLVM::GlobalOp::create(builder, loc, type, /*isConstant=*/true,
+ LLVM::Linkage::Internal, name,
+ builder.getStringAttr(value),
+ /*alignment=*/0);
}
// Get the pointer to the first character in the global string.
- Value globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
- Value cst0 = builder.create<LLVM::ConstantOp>(loc, builder.getI64Type(),
- builder.getIndexAttr(0));
- return builder.create<LLVM::GEPOp>(
- loc, LLVM::LLVMPointerType::get(builder.getContext()), global.getType(),
- globalPtr, ArrayRef<Value>({cst0, cst0}));
+ Value globalPtr = LLVM::AddressOfOp::create(builder, loc, global);
+ Value cst0 = LLVM::ConstantOp::create(builder, loc, builder.getI64Type(),
+ builder.getIndexAttr(0));
+ return LLVM::GEPOp::create(
+ builder, loc, LLVM::LLVMPointerType::get(builder.getContext()),
+ global.getType(), globalPtr, ArrayRef<Value>({cst0, cst0}));
}
};
} // namespace
diff --git a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
index 9371815577b1b..7d676f1b39200 100644
--- a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
@@ -121,8 +121,8 @@ class MLIRGenImpl {
llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
getType(VarType{}));
auto funcType = builder.getFunctionType(argTypes, /*results=*/{});
- return builder.create<mlir::toy::FuncOp>(location, proto.getName(),
- funcType);
+ return mlir::toy::FuncOp::create(builder, location, proto.getName(),
+ funcType);
}
/// Emit a new function and add it to the MLIR module.
@@ -166,7 +166,7 @@ class MLIRGenImpl {
if (!entryBlock.empty())
returnOp = dyn_cast<ReturnOp>(entryBlock.back());
if (!returnOp) {
- builder.create<ReturnOp>(loc(funcAST.getProto()->loc()));
+ ReturnOp::create(builder, loc(funcAST.getProto()->loc()));
} else if (returnOp.hasOperand()) {
// Otherwise, if this return operation has an operand then add a result to
// the function.
@@ -206,9 +206,9 @@ class MLIRGenImpl {
// support '+' and '*'.
switch (binop.getOp()) {
case '+':
- return builder.create<AddOp>(location, lhs, rhs);
+ return AddOp::create(builder, location, lhs, rhs);
case '*':
- return builder.create<MulOp>(location, lhs, rhs);
+ return MulOp::create(builder, location, lhs, rhs);
}
emitError(location, "invalid binary operator '") << binop.getOp() << "'";
@@ -239,8 +239,8 @@ class MLIRGenImpl {
}
// Otherwise, this return operation has zero operands.
- builder.create<ReturnOp>(location,
- expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
+ ReturnOp::create(builder, location,
+ expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
return mlir::success();
}
@@ -284,7 +284,7 @@ class MLIRGenImpl {
// Build the MLIR op `toy.constant`. This invokes the `ConstantOp::build`
// method.
- return builder.create<ConstantOp>(loc(lit.loc()), type, dataAttribute);
+ return ConstantOp::create(builder, loc(lit.loc()), type, dataAttribute);
}
/// Recursive helper function to accumulate the data that compose an array
@@ -329,13 +329,13 @@ class MLIRGenImpl {
"does not accept multiple arguments");
return nullptr;
}
- return builder.create<TransposeOp>(location, operands[0]);
+ return TransposeOp::create(builder, location, operands[0]);
}
// Otherwise this is a call to a user-defined function. Calls to
// user-defined functions are mapped to a custom call that takes the callee
// name as an attribute.
- return builder.create<GenericCallOp>(location, callee, operands);
+ return GenericCallOp::create(builder, location, callee, operands);
}
/// Emit a print expression. It emits specific operations for two builtins:
@@ -345,13 +345,13 @@ class MLIRGenImpl {
if (!arg)
return mlir::failure();
- builder.create<PrintOp>(loc(call.loc()), arg);
+ PrintOp::create(builder, loc(call.loc()), arg);
return mlir::success();
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
mlir::Value mlirGen(NumberExprAST &num) {
- return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
+ return ConstantOp::create(builder, loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
@@ -395,8 +395,8 @@ class MLIRGenImpl {
// with specific shape, we emit a "reshape" operation. It will get
// optimized out later as needed.
if (!vardecl.getType().shape.empty()) {
- value = builder.create<ReshapeOp>(loc(vardecl.loc()),
- getType(vardecl.getType()), value);
+ value = ReshapeOp::create(builder, loc(vardecl.loc()),
+ getType(vardecl.getType()), value);
}
// Register the value in the symbol table.
diff --git a/mlir/examples/toy/Ch7/include/toy/Ops.td b/mlir/examples/toy/Ch7/include/toy/Ops.td
index bdf8ad0bc21a1..9151396c8aac7 100644
--- a/mlir/examples/toy/Ch7/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch7/include/toy/Ops.td
@@ -93,7 +93,7 @@ def ConstantOp : Toy_Op<"constant",
// Add custom build methods for the constant operation. These method populates
// the `state` that MLIR uses to create operations, i.e. these are used when
- // using `builder.create<ConstantOp>(...)`.
+ // using `ConstantOp::create(builder, ...)`.
let builders = [
// Build a constant with a given constant tensor value.
OpBuilder<(ins "DenseElementsAttr":$value), [{
diff --git a/mlir/examples/toy/Ch7/mlir/Dialect.cpp b/mlir/examples/toy/Ch7/mlir/Dialect.cpp
index 52881db87d86b..4d2f063afd0fb 100644
--- a/mlir/examples/toy/Ch7/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch7/mlir/Dialect.cpp
@@ -97,7 +97,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
Operation *materializeCallConversion(OpBuilder &builder, Value input,
Type resultType,
Location conversionLoc) const final {
- return builder.create<CastOp>(conversionLoc, resultType, input);
+ return CastOp::create(builder, conversionLoc, resultType, input);
}
};
@@ -429,7 +429,8 @@ llvm::LogicalResult ReturnOp::verify() {
auto resultType = results.front();
// Check that the result type of the function matches the operand type.
- if (inputType == resultType || llvm::isa<mlir::UnrankedTensorType>(inputType) ||
+ if (inputType == resultType ||
+ llvm::isa<mlir::UnrankedTensorType>(inputType) ||
llvm::isa<mlir::UnrankedTensorType>(resultType))
return mlir::success();
@@ -657,8 +658,8 @@ mlir::Operation *ToyDialect::materializeConstant(mlir::OpBuilder &builder,
mlir::Type type,
mlir::Location loc) {
if (llvm::isa<StructType>(type))
- return builder.create<StructConstantOp>(loc, type,
- llvm::cast<mlir::ArrayAttr>(value));
- return builder.create<ConstantOp>(loc, type,
- llvm::cast<mlir::DenseElementsAttr>(value));
+ return StructConstantOp::create(builder, loc, type,
+ llvm::cast<mlir::ArrayAttr>(value));
+ return ConstantOp::create(builder, loc, type,
+ llvm::cast<mlir::DenseElementsAttr>(value));
}
diff --git a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
index bf2bc43301a33..d65c89c3fcfa6 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
@@ -55,7 +55,7 @@ static MemRefType convertTensorToMemRef(RankedTensorType type) {
/// Insert an allocation and deallocation for the given MemRefType.
static Value insertAllocAndDealloc(MemRefType type, Location loc,
PatternRewriter &rewriter) {
- auto alloc = rewriter.create<memref::AllocOp>(loc, type);
+ auto alloc = memref::AllocOp::create(rewriter, loc, type);
// Make sure to allocate at the beginning of the block.
auto *parentBlock = alloc->getBlock();
@@ -63,7 +63,7 @@ static Value insertAllocAndDealloc(MemRefType type, Location loc,
// Make sure to deallocate this alloc at the end of the block. This is fine
// as toy functions have no control flow.
- auto dealloc = rewriter.create<memref::DeallocOp>(loc, alloc);
+ auto dealloc = memref::DeallocOp::create(rewriter, loc, alloc);
dealloc->moveBefore(&parentBlock->back());
return alloc;
}
@@ -99,8 +99,8 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
// and the loop induction variables. This function will return the value
// to store at the current index.
Value valueToStore = processIteration(nestedBuilder, operands, ivs);
- nestedBuilder.create<affine::AffineStoreOp>(loc, valueToStore, alloc,
- ivs);
+ affine::AffineStoreOp::create(nestedBuilder, loc, valueToStore, alloc,
+ ivs);
});
// Replace this operation with the generated alloc.
@@ -131,15 +131,15 @@ struct BinaryOpLowering : public ConversionPattern {
// Generate loads for the element of 'lhs' and 'rhs' at the
// inner loop.
- auto loadedLhs = builder.create<affine::AffineLoadOp>(
- loc, binaryAdaptor.getLhs(), loopIvs);
- auto loadedRhs = builder.create<affine::AffineLoadOp>(
- loc, binaryAdaptor.getRhs(), loopIvs);
+ auto loadedLhs = affine::AffineLoadOp::create(
+ builder, loc, binaryAdaptor.getLhs(), loopIvs);
+ auto loadedRhs = affine::AffineLoadOp::create(
+ builder, loc, binaryAdaptor.getRhs(), loopIvs);
// Create the binary operation performed on the loaded
// values.
- return builder.create<LoweredBinaryOp>(loc, loadedLhs,
- loadedRhs);
+ return LoweredBinaryOp::create(builder, loc, loadedLhs,
+ loadedRhs);
});
return success();
}
@@ -174,11 +174,11 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
if (!valueShape.empty()) {
for (auto i : llvm::seq<int64_t>(0, *llvm::max_element(valueShape)))
constantIndices.push_back(
- rewriter.create<arith::ConstantIndexOp>(loc, i));
+ arith::ConstantIndexOp::create(rewriter, loc, i));
} else {
// This is the case of a tensor of rank 0.
constantIndices.push_back(
- rewriter.create<arith::ConstantIndexOp>(loc, 0));
+ arith::ConstantIndexOp::create(rewriter, loc, 0));
}
// The constant operation represents a multi-dimensional constant, so we
@@ -191,9 +191,9 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
// The last dimension is the base case of the recursion, at this point
// we store the element at the given index.
if (dimension == valueShape.size()) {
- rewriter.create<affine::AffineStoreOp>(
- loc, rewriter.create<arith::ConstantOp>(loc, *valueIt++), alloc,
- llvm::ArrayRef(indices));
+ affine::AffineStoreOp::create(
+ rewriter, loc, arith::ConstantOp::create(rewriter, loc, *valueIt++),
+ alloc, llvm::ArrayRef(indices));
return;
}
@@ -238,8 +238,8 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
}
// Create a new non-toy function, with the same region.
- auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
- op.getFunctionType());
+ auto func = mlir::func::FuncOp::create(rewriter, op.getLoc(), op.getName(),
+ op.getFunctionType());
rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
rewriter.eraseOp(op);
return success();
@@ -308,8 +308,8 @@ struct TransposeOpLowering : public ConversionPattern {
// Transpose the elements by generating a load from the
// reverse indices.
SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
- return builder.create<affine::AffineLoadOp>(loc, input,
- reverseIvs);
+ return affine::AffineLoadOp::create(builder, loc, input,
+ reverseIvs);
});
return success();
}
diff --git a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
index 54eeb275ae85e..43a84da88e189 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
@@ -86,12 +86,12 @@ class PrintOpLowering : public ConversionPattern {
// Create a loop for each of the dimensions within the shape.
SmallVector<Value, 4> loopIvs;
for (unsigned i = 0, e = memRefShape.size(); i != e; ++i) {
- auto lowerBound = rewriter.create<arith::ConstantIndexOp>(loc, 0);
+ auto lowerBound = arith::ConstantIndexOp::create(rewriter, loc, 0);
auto upperBound =
- rewriter.create<arith::ConstantIndexOp>(loc, memRefShape[i]);
- auto step = rewriter.create<arith::ConstantIndexOp>(loc, 1);
+ arith::ConstantIndexOp::create(rewriter, loc, memRefShape[i]);
+ auto step = arith::ConstantIndexOp::create(rewriter, loc, 1);
auto loop =
- rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step);
+ scf::ForOp::create(rewriter, loc, lowerBound, upperBound, step);
for (Operation &nested : make_early_inc_range(*loop.getBody()))
rewriter.eraseOp(&nested);
loopIvs.push_back(loop.getInductionVar());
@@ -101,19 +101,18 @@ class PrintOpLowering : public ConversionPattern {
// Insert a newline after each of the inner dimensions of the shape.
if (i != e - 1)
- rewriter.create<LLVM::CallOp>(loc, getPrintfType(context), printfRef,
- newLineCst);
- rewriter.create<scf::YieldOp>(loc);
+ LLVM::CallOp::create(rewriter, loc, getPrintfType(context), printfRef,
+ newLineCst);
+ scf::YieldOp::create(rewriter, loc);
rewriter.setInsertionPointToStart(loop.getBody());
}
// Generate a call to printf for the current element of the loop.
auto printOp = cast<toy::PrintOp>(op);
auto elementLoad =
- rewriter.create<memref::LoadOp>(loc, printOp.getInput(), loopIvs);
- rewriter.create<LLVM::CallOp>(
- loc, getPrintfType(context), printfRef,
- ArrayRef<Value>({formatSpecifierCst, elementLoad}));
+ memref::LoadOp::create(rewriter, loc, printOp.getInput(), loopIvs);
+ LLVM::CallOp::create(rewriter, loc, getPrintfType(context), printfRef,
+ ArrayRef<Value>({formatSpecifierCst, elementLoad}));
// Notify the rewriter that this operation has been removed.
rewriter.eraseOp(op);
@@ -142,8 +141,8 @@ class PrintOpLowering : public ConversionPattern {
// Insert the printf function into the body of the parent module.
PatternRewriter::InsertionGuard insertGuard(rewriter);
rewriter.setInsertionPointToStart(module.getBody());
- rewriter.create<LLVM::LLVMFuncOp>(module.getLoc(), "printf",
- getPrintfType(context));
+ LLVM::LLVMFuncOp::create(rewriter, module.getLoc(), "printf",
+ getPrintfType(context));
return SymbolRefAttr::get(context, "printf");
}
@@ -159,19 +158,19 @@ class PrintOpLowering : public ConversionPattern {
builder.setInsertionPointToStart(module.getBody());
auto type = LLVM::LLVMArrayType::get(
IntegerType::get(builder.getContext(), 8), value.size());
- global = builder.create<LLVM::GlobalOp>(loc, type, /*isConstant=*/true,
- LLVM::Linkage::Internal, name,
- builder.getStringAttr(value),
- /*alignment=*/0);
+ global = LLVM::GlobalOp::create(builder, loc, type, /*isConstant=*/true,
+ LLVM::Linkage::Internal, name,
+ builder.getStringAttr(value),
+ /*alignment=*/0);
}
// Get the pointer to the first character in the global string.
- Value globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
- Value cst0 = builder.create<LLVM::ConstantOp>(loc, builder.getI64Type(),
- builder.getIndexAttr(0));
- return builder.create<LLVM::GEPOp>(
- loc, LLVM::LLVMPointerType::get(builder.getContext()), global.getType(),
- globalPtr, ArrayRef<Value>({cst0, cst0}));
+ Value globalPtr = LLVM::AddressOfOp::create(builder, loc, global);
+ Value cst0 = LLVM::ConstantOp::create(builder, loc, builder.getI64Type(),
+ builder.getIndexAttr(0));
+ return LLVM::GEPOp::create(
+ builder, loc, LLVM::LLVMPointerType::get(builder.getContext()),
+ global.getType(), globalPtr, ArrayRef<Value>({cst0, cst0}));
}
};
} // namespace
diff --git a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
index 2490f17163b80..75dbc9104a992 100644
--- a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
@@ -183,8 +183,8 @@ class MLIRGenImpl {
argTypes.push_back(type);
}
auto funcType = builder.getFunctionType(argTypes, /*results=*/{});
- return builder.create<mlir::toy::FuncOp>(location, proto.getName(),
- funcType);
+ return mlir::toy::FuncOp::create(builder, location, proto.getName(),
+ funcType);
}
/// Emit a new function and add it to the MLIR module.
@@ -227,7 +227,7 @@ class MLIRGenImpl {
if (!entryBlock.empty())
returnOp = dyn_cast<ReturnOp>(entryBlock.back());
if (!returnOp) {
- builder.create<ReturnOp>(loc(funcAST.getProto()->loc()));
+ ReturnOp::create(builder, loc(funcAST.getProto()->loc()));
} else if (returnOp.hasOperand()) {
// Otherwise, if this return operation has an operand then add a result to
// the function.
@@ -333,7 +333,7 @@ class MLIRGenImpl {
emitError(location, "invalid access into struct expression");
return nullptr;
}
- return builder.create<StructAccessOp>(location, lhs, *accessIndex);
+ return StructAccessOp::create(builder, location, lhs, *accessIndex);
}
// Otherwise, this is a normal binary op.
@@ -345,9 +345,9 @@ class MLIRGenImpl {
// support '+' and '*'.
switch (binop.getOp()) {
case '+':
- return builder.create<AddOp>(location, lhs, rhs);
+ return AddOp::create(builder, location, lhs, rhs);
case '*':
- return builder.create<MulOp>(location, lhs, rhs);
+ return MulOp::create(builder, location, lhs, rhs);
}
emitError(location, "invalid binary operator '") << binop.getOp() << "'";
@@ -378,8 +378,8 @@ class MLIRGenImpl {
}
// Otherwise, this return operation has zero operands.
- builder.create<ReturnOp>(location,
- expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
+ ReturnOp::create(builder, location,
+ expr ? ArrayRef(expr) : ArrayRef<mlir::Value>());
return mlir::success();
}
@@ -464,7 +464,7 @@ class MLIRGenImpl {
// Build the MLIR op `toy.constant`. This invokes the `ConstantOp::build`
// method.
- return builder.create<ConstantOp>(loc(lit.loc()), type, dataAttribute);
+ return ConstantOp::create(builder, loc(lit.loc()), type, dataAttribute);
}
/// Emit a struct literal. It will be emitted as an array of
@@ -477,7 +477,8 @@ class MLIRGenImpl {
// Build the MLIR op `toy.struct_constant`. This invokes the
// `StructConstantOp::build` method.
- return builder.create<StructConstantOp>(loc(lit.loc()), dataType, dataAttr);
+ return StructConstantOp::create(builder, loc(lit.loc()), dataType,
+ dataAttr);
}
/// Recursive helper function to accumulate the data that compose an array
@@ -522,7 +523,7 @@ class MLIRGenImpl {
"does not accept multiple arguments");
return nullptr;
}
- return builder.create<TransposeOp>(location, operands[0]);
+ return TransposeOp::create(builder, location, operands[0]);
}
// Otherwise this is a call to a user-defined function. Calls to
@@ -534,8 +535,9 @@ class MLIRGenImpl {
return nullptr;
}
mlir::toy::FuncOp calledFunc = calledFuncIt->second;
- return builder.create<GenericCallOp>(
- location, calledFunc.getFunctionType().getResult(0), callee, operands);
+ return GenericCallOp::create(builder, location,
+ calledFunc.getFunctionType().getResult(0),
+ callee, operands);
}
/// Emit a print expression. It emits specific operations for two builtins:
@@ -545,13 +547,13 @@ class MLIRGenImpl {
if (!arg)
return mlir::failure();
- builder.create<PrintOp>(loc(call.loc()), arg);
+ PrintOp::create(builder, loc(call.loc()), arg);
return mlir::success();
}
/// Emit a constant for a single number (FIXME: semantic? broadcast?)
mlir::Value mlirGen(NumberExprAST &num) {
- return builder.create<ConstantOp>(loc(num.loc()), num.getValue());
+ return ConstantOp::create(builder, loc(num.loc()), num.getValue());
}
/// Dispatch codegen for the right expression subclass using RTTI.
@@ -613,8 +615,8 @@ class MLIRGenImpl {
// declared with specific shape, we emit a "reshape" operation. It will
// get optimized out later as needed.
} else if (!varType.shape.empty()) {
- value = builder.create<ReshapeOp>(loc(vardecl.loc()),
- getType(varType.shape), value);
+ value = ReshapeOp::create(builder, loc(vardecl.loc()),
+ getType(varType.shape), value);
}
// Register the value in the symbol table.
More information about the Mlir-commits
mailing list