[flang-commits] [flang] 986b5c5 - [mlir] Flip Async/GPU/OpenACC/OpenMP to use Both accessors
River Riddle via flang-commits
flang-commits at lists.llvm.org
Wed Sep 21 17:52:09 PDT 2022
Author: River Riddle
Date: 2022-09-21T17:36:13-07:00
New Revision: 986b5c56ea678dcc33cb55270bb79a83a20e5d60
URL: https://github.com/llvm/llvm-project/commit/986b5c56ea678dcc33cb55270bb79a83a20e5d60
DIFF: https://github.com/llvm/llvm-project/commit/986b5c56ea678dcc33cb55270bb79a83a20e5d60.diff
LOG: [mlir] Flip Async/GPU/OpenACC/OpenMP to use Both accessors
This allows for incrementally updating the old API usages without
needing to update everything at once. These will be left on Both
for a little bit and then flipped to prefixed when all APIs have been
updated.
Differential Revision: https://reviews.llvm.org/D134386
Added:
Modified:
flang/lib/Lower/OpenACC.cpp
mlir/include/mlir/Dialect/Async/IR/AsyncDialect.td
mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
mlir/include/mlir/Dialect/GPU/IR/GPUBase.td
mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td
mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
mlir/lib/Dialect/Async/IR/Async.cpp
mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
mlir/test/Dialect/Async/ops.mlir
mlir/test/Dialect/GPU/invalid.mlir
mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
Removed:
################################################################################
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 9d525cec784e3..89c7d005ca350 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -350,8 +350,7 @@ createLoopOp(Fortran::lower::AbstractConverter &converter,
auto loopOp = createRegionOp<mlir::acc::LoopOp, mlir::acc::YieldOp>(
firOpBuilder, currentLocation, operands, operandSegments);
- loopOp->setAttr(mlir::acc::LoopOp::getExecutionMappingAttrName(),
- firOpBuilder.getI64IntegerAttr(executionMapping));
+ loopOp.setExecMappingAttr(firOpBuilder.getI64IntegerAttr(executionMapping));
// Lower clauses mapped to attributes
for (const Fortran::parser::AccClause &clause : accClauseList.v) {
@@ -361,18 +360,15 @@ createLoopOp(Fortran::lower::AbstractConverter &converter,
const std::optional<int64_t> collapseValue =
Fortran::evaluate::ToInt64(*expr);
if (collapseValue) {
- loopOp->setAttr(mlir::acc::LoopOp::getCollapseAttrName(),
- firOpBuilder.getI64IntegerAttr(*collapseValue));
+ loopOp.setCollapseAttr(firOpBuilder.getI64IntegerAttr(*collapseValue));
}
} else if (std::get_if<Fortran::parser::AccClause::Seq>(&clause.u)) {
- loopOp->setAttr(mlir::acc::LoopOp::getSeqAttrName(),
- firOpBuilder.getUnitAttr());
+ loopOp.setSeqAttr(firOpBuilder.getUnitAttr());
} else if (std::get_if<Fortran::parser::AccClause::Independent>(
&clause.u)) {
- loopOp->setAttr(mlir::acc::LoopOp::getIndependentAttrName(),
- firOpBuilder.getUnitAttr());
+ loopOp.setIndependentAttr(firOpBuilder.getUnitAttr());
} else if (std::get_if<Fortran::parser::AccClause::Auto>(&clause.u)) {
- loopOp->setAttr(mlir::acc::LoopOp::getAutoAttrName(),
+ loopOp->setAttr(mlir::acc::LoopOp::getAutoAttrStrName(),
firOpBuilder.getUnitAttr());
}
}
diff --git a/mlir/include/mlir/Dialect/Async/IR/AsyncDialect.td b/mlir/include/mlir/Dialect/Async/IR/AsyncDialect.td
index 7da6cda5cbe04..d22e7a2bbcb51 100644
--- a/mlir/include/mlir/Dialect/Async/IR/AsyncDialect.td
+++ b/mlir/include/mlir/Dialect/Async/IR/AsyncDialect.td
@@ -40,7 +40,7 @@ def AsyncDialect : Dialect {
}];
// TODO: Prefixed form overlaps with generated names, update before flipping.
- let emitAccessorPrefix = kEmitAccessorPrefix_Raw;
+ let emitAccessorPrefix = kEmitAccessorPrefix_Both;
}
#endif // ASYNC_DIALECT_TD
diff --git a/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td b/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
index 50f8e434951d6..0830e95fb15ff 100644
--- a/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
+++ b/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
@@ -76,11 +76,11 @@ def Async_ExecuteOp :
}];
let arguments = (ins Variadic<Async_TokenType>:$dependencies,
- Variadic<Async_AnyValueOrTokenType>:$operands);
+ Variadic<Async_AnyValueOrTokenType>:$bodyOperands);
let results = (outs Async_TokenType:$token,
- Variadic<Async_ValueType>:$results);
- let regions = (region SizedRegion<1>:$body);
+ Variadic<Async_ValueType>:$bodyResults);
+ let regions = (region SizedRegion<1>:$bodyRegion);
let hasCustomAssemblyFormat = 1;
let skipDefaultBuilders = 1;
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td
index 0541444e52b14..0258a9b350e94 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td
@@ -57,9 +57,8 @@ def GPU_Dialect : Dialect {
let useDefaultAttributePrinterParser = 1;
let useDefaultTypePrinterParser = 1;
- // TODO: This has overlapping accessors with generated when switched to
- // prefixed. Fix and update to _Both & then _Prefixed.
- let emitAccessorPrefix = kEmitAccessorPrefix_Raw;
+ // TODO: Update this to _Prefixed.
+ let emitAccessorPrefix = kEmitAccessorPrefix_Both;
}
def GPU_AsyncToken : DialectType<
@@ -114,10 +113,7 @@ def GPU_AsyncOpInterface : OpInterface<"AsyncOpInterface"> {
InterfaceMethod<[{
Query the result that represents the async token to depend on.
}],
- "OpResult", "getAsyncToken", (ins), [{}], [{
- ConcreteOp op = cast<ConcreteOp>(this->getOperation());
- return op.asyncToken().template dyn_cast_or_null<OpResult>();
- }]
+ "Value", "getAsyncToken"
>
];
}
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index 17c8661eff4f2..c7816063bf870 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -307,10 +307,6 @@ def GPU_GPUFuncOp : GPU_Op<"func", [
return "workgroup_attributions";
}
- /// Returns the type of this function.
- /// FIXME: Remove when GPU uses prefixed accessors.
- FunctionType getFunctionType() { return function_type(); }
-
/// Returns the argument types of this function.
ArrayRef<Type> getArgumentTypes() { return getFunctionType().getInputs(); }
@@ -338,7 +334,7 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func",
Index:$gridSizeX, Index:$gridSizeY, Index:$gridSizeZ,
Index:$blockSizeX, Index:$blockSizeY, Index:$blockSizeZ,
Optional<I32>:$dynamicSharedMemorySize,
- Variadic<AnyType>:$operands)>,
+ Variadic<AnyType>:$kernelOperands)>,
Results<(outs Optional<GPU_AsyncToken>:$asyncToken)> {
let summary = "Launches a function as a GPU kernel";
@@ -455,9 +451,6 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func",
// present since it is run before the verifier of this op.
friend LogicalResult GPUDialect::verifyOperationAttribute(Operation *,
NamedAttribute);
-
- /// The name of the symbol reference attribute specifying the kernel to launch.
- static StringRef getKernelAttrName() { return "kernel"; }
}];
let assemblyFormat = [{
@@ -466,7 +459,7 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func",
`blocks` `in` ` ` `(`$gridSizeX`,` $gridSizeY`,` $gridSizeZ`)`
`threads` `in` ` ` `(`$blockSizeX`,` $blockSizeY`,` $blockSizeZ`)`
(`dynamic_shared_memory_size` $dynamicSharedMemorySize^)?
- custom<LaunchFuncOperands>($operands, type($operands)) attr-dict
+ custom<LaunchFuncOperands>($kernelOperands, type($kernelOperands)) attr-dict
}];
let hasVerifier = 1;
}
@@ -743,10 +736,10 @@ def I32OrF32 : TypeConstraint<Or<[I32.predicate, F32.predicate]>,
"i32 or f32">;
def GPU_ShuffleOp : GPU_Op<
- "shuffle", [NoSideEffect, AllTypesMatch<["value", "result"]>]>,
+ "shuffle", [NoSideEffect, AllTypesMatch<["value", "shuffleResult"]>]>,
Arguments<(ins I32OrF32:$value, I32:$offset, I32:$width,
GPU_ShuffleModeAttr:$mode)>,
- Results<(outs I32OrF32:$result, I1:$valid)> {
+ Results<(outs I32OrF32:$shuffleResult, I1:$valid)> {
let summary = "Shuffles values within a subgroup.";
let description = [{
The "shuffle" op moves values to a
diff erent invocation within the same
@@ -828,7 +821,7 @@ def GPU_GPUModuleOp : GPU_Op<"module", [
```
}];
let builders = [OpBuilder<(ins "StringRef":$name)>];
- let regions = (region SizedRegion<1>:$body);
+ let regions = (region SizedRegion<1>:$bodyRegion);
let hasCustomAssemblyFormat = 1;
// We need to ensure the block inside the region is properly terminated;
@@ -1263,14 +1256,14 @@ def GPU_SubgroupMmaElementwiseOp : GPU_Op<"subgroup_mma_elementwise",
Example:
```mlir
- %0 = %A, %B { operation = "ADD" } :
+ %0 = %A, %B { opType = "ADD" } :
(!gpu.mma_matrix<16x16xf16, "COp">, !gpu.mma_matrix<16x16xf16, "COp">)
-> !gpu.mma_matrix<16x16xf16, "COp">
```
}];
let arguments = (ins Variadic<GPU_MMAMatrix>:$args,
- MMAElementWiseAttr:$operation);
+ MMAElementWiseAttr:$opType);
let results = (outs GPU_MMAMatrix:$res);
@@ -1281,7 +1274,7 @@ def GPU_SubgroupMmaElementwiseOp : GPU_Op<"subgroup_mma_elementwise",
}];
let assemblyFormat = [{
- $operation $args attr-dict `:` functional-type($args, $res)
+ $opType $args attr-dict `:` functional-type($args, $res)
}];
}
diff --git a/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td b/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td
index 2337e382a123a..d69e2524f0a51 100644
--- a/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td
+++ b/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td
@@ -29,7 +29,7 @@ def OpenACC_Dialect : Dialect {
let cppNamespace = "::mlir::acc";
// TODO: Flip to _Prefixed.
- let emitAccessorPrefix = kEmitAccessorPrefix_Raw;
+ let emitAccessorPrefix = kEmitAccessorPrefix_Both;
}
// AccCommon requires definition of OpenACC_Dialect.
@@ -390,11 +390,11 @@ def OpenACC_LoopOp : OpenACC_Op<"loop",
let regions = (region AnyRegion:$region);
let extraClassDeclaration = [{
- static StringRef getCollapseAttrName() { return "collapse"; }
- static StringRef getSeqAttrName() { return "seq"; }
- static StringRef getIndependentAttrName() { return "independent"; }
- static StringRef getAutoAttrName() { return "auto"; }
- static StringRef getExecutionMappingAttrName() { return "exec_mapping"; }
+ static StringRef getCollapseAttrStrName() { return "collapse"; }
+ static StringRef getSeqAttrStrName() { return "seq"; }
+ static StringRef getIndependentAttrStrName() { return "independent"; }
+ static StringRef getAutoAttrStrName() { return "auto"; }
+ static StringRef getExecutionMappingAttrStrName() { return "exec_mapping"; }
static StringRef getGangKeyword() { return "gang"; }
static StringRef getGangNumKeyword() { return "num"; }
static StringRef getGangStaticKeyword() { return "static"; }
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index cb130563412d6..3e5a3fdb4f09e 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -30,7 +30,7 @@ def OpenMP_Dialect : Dialect {
let useDefaultAttributePrinterParser = 1;
// TODO: Flip to _Prefixed.
- let emitAccessorPrefix = kEmitAccessorPrefix_Raw;
+ let emitAccessorPrefix = kEmitAccessorPrefix_Both;
}
// OmpCommon requires definition of OpenACC_Dialect.
@@ -114,15 +114,6 @@ def ParallelOp : OpenMP_Op<"parallel", [
) $region attr-dict
}];
let hasVerifier = 1;
- let extraClassDeclaration = [{
- // TODO: remove this once emitAccessorPrefix is set to
- // kEmitAccessorPrefix_Prefixed for the dialect.
- /// Returns the reduction variables
- SmallVector<Value> getReductionVars() {
- return SmallVector<Value>(reduction_vars().begin(),
- reduction_vars().end());
- }
- }];
}
def TerminatorOp : OpenMP_Op<"terminator", [Terminator, NoSideEffect]> {
@@ -221,16 +212,6 @@ def SectionsOp : OpenMP_Op<"sections", [AttrSizedOperandSegments,
let hasVerifier = 1;
let hasRegionVerifier = 1;
-
- let extraClassDeclaration = [{
- // TODO: remove this once emitAccessorPrefix is set to
- // kEmitAccessorPrefix_Prefixed for the dialect.
- /// Returns the reduction variables
- SmallVector<Value> getReductionVars() {
- return SmallVector<Value>(reduction_vars().begin(),
- reduction_vars().end());
- }
- }];
}
//===----------------------------------------------------------------------===//
@@ -363,14 +344,6 @@ def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments,
/// Returns the number of reduction variables.
unsigned getNumReductionVars() { return reduction_vars().size(); }
-
- // TODO: remove this once emitAccessorPrefix is set to
- // kEmitAccessorPrefix_Prefixed for the dialect.
- /// Returns the reduction variables
- SmallVector<Value> getReductionVars() {
- return SmallVector<Value>(reduction_vars().begin(),
- reduction_vars().end());
- }
}];
let hasCustomAssemblyFormat = 1;
let assemblyFormat = [{
@@ -723,7 +696,7 @@ def TaskLoopOp : OpenMP_Op<"taskloop", [AttrSizedOperandSegments,
let extraClassDeclaration = [{
/// Returns the reduction variables
- SmallVector<Value> getReductionVars();
+ SmallVector<Value> getAllReductionVars();
void getEffects(SmallVectorImpl<MemoryEffects::EffectInstance> &effects);
}];
@@ -779,7 +752,7 @@ def TaskGroupOp : OpenMP_Op<"taskgroup", [AttrSizedOperandSegments,
let extraClassDeclaration = [{
/// Returns the reduction variables
- operand_range getReductionVars() { return task_reduction_vars(); }
+ operand_range getAllReductionVars() { return task_reduction_vars(); }
}];
let hasVerifier = 1;
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
index d1dc9bd86beaa..a42ebc7253410 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
@@ -41,7 +41,9 @@ def ReductionClauseInterface : OpInterface<"ReductionClauseInterface"> {
let methods = [
InterfaceMethod<
"Get reduction vars", "::mlir::SmallVector<::mlir::Value>",
- "getReductionVars">,
+ "getAllReductionVars", (ins), [{}], [{
+ return $_op.getReductionVars();
+ }]>,
];
}
diff --git a/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp b/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
index e8a9c955c3d8d..711de63443419 100644
--- a/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp
@@ -337,9 +337,8 @@ struct WmmaElementwiseOpToNVVMLowering
extractedOperands.push_back(rewriter.create<LLVM::ExtractValueOp>(
loc, adaptor.getOperands()[opIdx], i));
}
- Value element =
- createScalarOp(rewriter, loc, subgroupMmaElementwiseOp.operation(),
- extractedOperands);
+ Value element = createScalarOp(
+ rewriter, loc, subgroupMmaElementwiseOp.opType(), extractedOperands);
matrixStruct =
rewriter.create<LLVM::InsertValueOp>(loc, matrixStruct, element, i);
}
diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
index 2b8389450e31d..e1fa1fefbf246 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
@@ -333,7 +333,7 @@ LogicalResult GPUModuleConversion::matchAndRewrite(
// Move the region from the module op into the SPIR-V module.
Region &spvModuleRegion = spvModule.getRegion();
- rewriter.inlineRegionBefore(moduleOp.body(), spvModuleRegion,
+ rewriter.inlineRegionBefore(moduleOp.getBodyRegion(), spvModuleRegion,
spvModuleRegion.begin());
// The spv.module build method adds a block. Remove that.
rewriter.eraseBlock(&spvModuleRegion.back());
diff --git a/mlir/lib/Dialect/Async/IR/Async.cpp b/mlir/lib/Dialect/Async/IR/Async.cpp
index 8a68e0d77a7ff..c31fbc4af87ce 100644
--- a/mlir/lib/Dialect/Async/IR/Async.cpp
+++ b/mlir/lib/Dialect/Async/IR/Async.cpp
@@ -37,9 +37,10 @@ LogicalResult YieldOp::verify() {
// Get the underlying value types from async values returned from the
// parent `async.execute` operation.
auto executeOp = (*this)->getParentOfType<ExecuteOp>();
- auto types = llvm::map_range(executeOp.results(), [](const OpResult &result) {
- return result.getType().cast<ValueType>().getValueType();
- });
+ auto types =
+ llvm::map_range(executeOp.bodyResults(), [](const OpResult &result) {
+ return result.getType().cast<ValueType>().getValueType();
+ });
if (getOperandTypes() != types)
return emitOpError("operand types do not match the types returned from "
@@ -61,7 +62,7 @@ constexpr char kOperandSegmentSizesAttr[] = "operand_segment_sizes";
OperandRange ExecuteOp::getSuccessorEntryOperands(Optional<unsigned> index) {
assert(index && *index == 0 && "invalid region index");
- return operands();
+ return bodyOperands();
}
bool ExecuteOp::areTypesCompatible(Type lhs, Type rhs) {
@@ -79,12 +80,13 @@ void ExecuteOp::getSuccessorRegions(Optional<unsigned> index,
// The `body` region branch back to the parent operation.
if (index) {
assert(*index == 0 && "invalid region index");
- regions.push_back(RegionSuccessor(results()));
+ regions.push_back(RegionSuccessor(bodyResults()));
return;
}
// Otherwise the successor is the body region.
- regions.push_back(RegionSuccessor(&body(), body().getArguments()));
+ regions.push_back(
+ RegionSuccessor(&bodyRegion(), bodyRegion().getArguments()));
}
void ExecuteOp::build(OpBuilder &builder, OperationState &result,
@@ -138,10 +140,10 @@ void ExecuteOp::print(OpAsmPrinter &p) {
p << " [" << dependencies() << "]";
// (%value as %unwrapped: !async.value<!arg.type>, ...)
- if (!operands().empty()) {
+ if (!bodyOperands().empty()) {
p << " (";
- Block *entry = body().empty() ? nullptr : &body().front();
- llvm::interleaveComma(operands(), p, [&, n = 0](Value operand) mutable {
+ Block *entry = bodyRegion().empty() ? nullptr : &bodyRegion().front();
+ llvm::interleaveComma(bodyOperands(), p, [&, n = 0](Value operand) mutable {
Value argument = entry ? entry->getArgument(n++) : Value();
p << operand << " as " << argument << ": " << operand.getType();
});
@@ -153,7 +155,7 @@ void ExecuteOp::print(OpAsmPrinter &p) {
p.printOptionalAttrDictWithKeyword((*this)->getAttrs(),
{kOperandSegmentSizesAttr});
p << ' ';
- p.printRegion(body(), /*printEntryBlockArgs=*/false);
+ p.printRegion(bodyRegion(), /*printEntryBlockArgs=*/false);
}
ParseResult ExecuteOp::parse(OpAsmParser &parser, OperationState &result) {
@@ -226,12 +228,12 @@ ParseResult ExecuteOp::parse(OpAsmParser &parser, OperationState &result) {
LogicalResult ExecuteOp::verifyRegions() {
// Unwrap async.execute value operands types.
- auto unwrappedTypes = llvm::map_range(operands(), [](Value operand) {
+ auto unwrappedTypes = llvm::map_range(bodyOperands(), [](Value operand) {
return operand.getType().cast<ValueType>().getValueType();
});
// Verify that unwrapped argument types matches the body region arguments.
- if (body().getArgumentTypes() != unwrappedTypes)
+ if (bodyRegion().getArgumentTypes() != unwrappedTypes)
return emitOpError("async body region argument types do not match the "
"execute operation arguments types");
diff --git a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
index 2074947fe33d7..258400d02b132 100644
--- a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
+++ b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
@@ -250,13 +250,14 @@ outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) {
// Make sure that all constants will be inside the outlined async function to
// reduce the number of function arguments.
- cloneConstantsIntoTheRegion(execute.body());
+ cloneConstantsIntoTheRegion(execute.bodyRegion());
// Collect all outlined function inputs.
SetVector<mlir::Value> functionInputs(execute.dependencies().begin(),
execute.dependencies().end());
- functionInputs.insert(execute.operands().begin(), execute.operands().end());
- getUsedValuesDefinedAbove(execute.body(), functionInputs);
+ functionInputs.insert(execute.bodyOperands().begin(),
+ execute.bodyOperands().end());
+ getUsedValuesDefinedAbove(execute.bodyRegion(), functionInputs);
// Collect types for the outlined function inputs and outputs.
auto typesRange = llvm::map_range(
@@ -279,7 +280,7 @@ outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) {
// Prepare for coroutine conversion by creating the body of the function.
{
size_t numDependencies = execute.dependencies().size();
- size_t numOperands = execute.operands().size();
+ size_t numOperands = execute.bodyOperands().size();
// Await on all dependencies before starting to execute the body region.
for (size_t i = 0; i < numDependencies; ++i)
@@ -296,11 +297,11 @@ outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) {
// arguments.
BlockAndValueMapping valueMapping;
valueMapping.map(functionInputs, func.getArguments());
- valueMapping.map(execute.body().getArguments(), unwrappedOperands);
+ valueMapping.map(execute.bodyRegion().getArguments(), unwrappedOperands);
// Clone all operations from the execute operation body into the outlined
// function body.
- for (Operation &op : execute.body().getOps())
+ for (Operation &op : execute.bodyRegion().getOps())
builder.clone(op, valueMapping);
}
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 5d997325743a4..e4c3552463b92 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -213,7 +213,7 @@ LogicalResult GPUDialect::verifyOperationAttribute(Operation *op,
// Ignore launch ops with missing attributes here. The errors will be
// reported by the verifiers of those ops.
if (!launchOp->getAttrOfType<SymbolRefAttr>(
- LaunchFuncOp::getKernelAttrName()))
+ LaunchFuncOp::getKernelAttrName(launchOp->getName())))
return success();
// Check that `launch_func` refers to a well-formed GPU kernel module.
@@ -703,7 +703,7 @@ void LaunchFuncOp::build(OpBuilder &builder, OperationState &result,
auto kernelSymbol =
SymbolRefAttr::get(kernelModule.getNameAttr(),
{SymbolRefAttr::get(kernelFunc.getNameAttr())});
- result.addAttribute(getKernelAttrName(), kernelSymbol);
+ result.addAttribute(getKernelAttrName(result.name), kernelSymbol);
SmallVector<int32_t, 9> segmentSizes(9, 1);
segmentSizes.front() = asyncDependencies.size();
segmentSizes[segmentSizes.size() - 2] = dynamicSharedMemorySize ? 1 : 0;
@@ -718,9 +718,11 @@ StringAttr LaunchFuncOp::getKernelModuleName() {
StringAttr LaunchFuncOp::getKernelName() { return kernel().getLeafReference(); }
-unsigned LaunchFuncOp::getNumKernelOperands() { return operands().size(); }
+unsigned LaunchFuncOp::getNumKernelOperands() {
+ return kernelOperands().size();
+}
-Value LaunchFuncOp::getKernelOperand(unsigned i) { return operands()[i]; }
+Value LaunchFuncOp::getKernelOperand(unsigned i) { return kernelOperands()[i]; }
KernelDim3 LaunchFuncOp::getGridSizeOperandValues() {
auto operands = getOperands().drop_front(asyncDependencies().size());
@@ -743,11 +745,6 @@ LogicalResult LaunchFuncOp::verify() {
GPUDialect::getContainerModuleAttrName() +
"' attribute");
- auto kernelAttr = (*this)->getAttrOfType<SymbolRefAttr>(getKernelAttrName());
- if (!kernelAttr)
- return emitOpError("symbol reference attribute '" + getKernelAttrName() +
- "' must be specified");
-
return success();
}
diff --git a/mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp b/mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
index 92d186b3bd410..1b55fab6d134b 100644
--- a/mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
@@ -169,7 +169,7 @@ async::ExecuteOp addExecuteResults(async::ExecuteOp executeOp,
OpBuilder builder(executeOp);
auto newOp = builder.create<async::ExecuteOp>(
executeOp.getLoc(), TypeRange{resultTypes}.drop_front() /*drop token*/,
- executeOp.dependencies(), executeOp.operands());
+ executeOp.dependencies(), executeOp.bodyOperands());
BlockAndValueMapping mapper;
newOp.getRegion().getBlocks().clear();
executeOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
@@ -258,7 +258,7 @@ struct GpuAsyncRegionPass::DeferWaitCallback {
// Set `it` to the beginning of the region and add asyncTokens to the
// async.execute operands.
it = executeOp.getBody()->begin();
- executeOp.operandsMutable().append(asyncTokens);
+ executeOp.bodyOperandsMutable().append(asyncTokens);
SmallVector<Type, 1> tokenTypes(
asyncTokens.size(), builder.getType<gpu::AsyncTokenType>());
SmallVector<Location, 1> tokenLocs(asyncTokens.size(),
@@ -301,7 +301,7 @@ struct GpuAsyncRegionPass::SingleTokenUseCallback {
void operator()(async::ExecuteOp executeOp) {
// Extract !gpu.async.token results which have multiple uses.
auto multiUseResults =
- llvm::make_filter_range(executeOp.results(), [](OpResult result) {
+ llvm::make_filter_range(executeOp.bodyResults(), [](OpResult result) {
if (result.use_empty() || result.hasOneUse())
return false;
auto valueType = result.getType().dyn_cast<async::ValueType>();
@@ -319,16 +319,16 @@ struct GpuAsyncRegionPass::SingleTokenUseCallback {
});
for (auto index : indices) {
- assert(!executeOp.results()[index].getUses().empty());
+ assert(!executeOp.bodyResults()[index].getUses().empty());
// Repeat async.yield token result, one for each use after the first one.
- auto uses = llvm::drop_begin(executeOp.results()[index].getUses());
+ auto uses = llvm::drop_begin(executeOp.bodyResults()[index].getUses());
auto count = std::distance(uses.begin(), uses.end());
auto yieldOp = cast<async::YieldOp>(executeOp.getBody()->getTerminator());
SmallVector<Value, 4> operands(count, yieldOp.getOperand(index));
executeOp = addExecuteResults(executeOp, operands);
// Update 'uses' to refer to the new executeOp.
- uses = llvm::drop_begin(executeOp.results()[index].getUses());
- auto results = executeOp.results().take_back(count);
+ uses = llvm::drop_begin(executeOp.bodyResults()[index].getUses());
+ auto results = executeOp.bodyResults().take_back(count);
for (auto pair : llvm::zip(uses, results))
std::get<0>(pair).set(std::get<1>(pair));
}
diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
index 82e276efdfa21..c7cb61ed3e289 100644
--- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
+++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
@@ -579,7 +579,7 @@ ParseResult LoopOp::parse(OpAsmParser &parser, OperationState &result) {
return failure();
if (executionMapping != acc::OpenACCExecMapping::NONE)
- result.addAttribute(LoopOp::getExecutionMappingAttrName(),
+ result.addAttribute(LoopOp::getExecutionMappingAttrStrName(),
builder.getI64IntegerAttr(executionMapping));
// Parse optional results in case there is a reduce.
@@ -662,16 +662,16 @@ void LoopOp::print(OpAsmPrinter &printer) {
/*printBlockTerminators=*/true);
printer.printOptionalAttrDictWithKeyword(
- (*this)->getAttrs(), {LoopOp::getExecutionMappingAttrName(),
+ (*this)->getAttrs(), {LoopOp::getExecutionMappingAttrStrName(),
LoopOp::getOperandSegmentSizeAttr()});
}
LogicalResult acc::LoopOp::verify() {
// auto, independent and seq attribute are mutually exclusive.
if ((auto_() && (independent() || seq())) || (independent() && seq())) {
- return emitError("only one of " + acc::LoopOp::getAutoAttrName() + ", " +
- acc::LoopOp::getIndependentAttrName() + ", " +
- acc::LoopOp::getSeqAttrName() +
+ return emitError("only one of " + acc::LoopOp::getAutoAttrStrName() + ", " +
+ acc::LoopOp::getIndependentAttrStrName() + ", " +
+ acc::LoopOp::getSeqAttrStrName() +
" can be present at the same time");
}
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index a1189cd898c71..27f9d1ba8f6a3 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -663,7 +663,7 @@ LogicalResult ReductionOp::verify() {
"reduction clause interface";
while (op) {
for (const auto &var :
- cast<ReductionClauseInterface>(op).getReductionVars())
+ cast<ReductionClauseInterface>(op).getAllReductionVars())
if (var == accumulator())
return success();
op = op->getParentWithTrait<ReductionClauseInterface::Trait>();
@@ -689,7 +689,7 @@ LogicalResult TaskGroupOp::verify() {
//===----------------------------------------------------------------------===//
// TaskLoopOp
//===----------------------------------------------------------------------===//
-SmallVector<Value> TaskLoopOp::getReductionVars() {
+SmallVector<Value> TaskLoopOp::getAllReductionVars() {
SmallVector<Value> allReductionNvars(in_reduction_vars().begin(),
in_reduction_vars().end());
allReductionNvars.insert(allReductionNvars.end(), reduction_vars().begin(),
diff --git a/mlir/test/Dialect/Async/ops.mlir b/mlir/test/Dialect/Async/ops.mlir
index 99036f270f00d..c391fffd87ebc 100644
--- a/mlir/test/Dialect/Async/ops.mlir
+++ b/mlir/test/Dialect/Async/ops.mlir
@@ -26,13 +26,13 @@ func.func @empty_async_execute() -> !async.token {
// CHECK-LABEL: @return_async_value
func.func @return_async_value() -> !async.value<f32> {
// CHECK: async.execute -> !async.value<f32>
- %token, %results = async.execute -> !async.value<f32> {
+ %token, %bodyResults = async.execute -> !async.value<f32> {
%cst = arith.constant 1.000000e+00 : f32
async.yield %cst : f32
}
- // CHECK: return %results : !async.value<f32>
- return %results : !async.value<f32>
+ // CHECK: return %bodyResults : !async.value<f32>
+ return %bodyResults : !async.value<f32>
}
// CHECK-LABEL: @return_captured_value
@@ -49,14 +49,14 @@ func.func @return_captured_value() -> !async.token {
// CHECK-LABEL: @return_async_values
func.func @return_async_values() -> (!async.value<f32>, !async.value<f32>) {
- %token, %results:2 = async.execute -> (!async.value<f32>, !async.value<f32>) {
+ %token, %bodyResults:2 = async.execute -> (!async.value<f32>, !async.value<f32>) {
%cst1 = arith.constant 1.000000e+00 : f32
%cst2 = arith.constant 2.000000e+00 : f32
async.yield %cst1, %cst2 : f32, f32
}
- // CHECK: return %results#0, %results#1 : !async.value<f32>, !async.value<f32>
- return %results#0, %results#1 : !async.value<f32>, !async.value<f32>
+ // CHECK: return %bodyResults#0, %bodyResults#1 : !async.value<f32>, !async.value<f32>
+ return %bodyResults#0, %bodyResults#1 : !async.value<f32>, !async.value<f32>
}
// CHECK-LABEL: @async_token_dependencies
diff --git a/mlir/test/Dialect/GPU/invalid.mlir b/mlir/test/Dialect/GPU/invalid.mlir
index b48938c486bee..f3c81233f7b54 100644
--- a/mlir/test/Dialect/GPU/invalid.mlir
+++ b/mlir/test/Dialect/GPU/invalid.mlir
@@ -302,7 +302,7 @@ func.func @reduce_incorrect_yield(%arg0 : f32) {
// -----
func.func @shuffle_mismatching_type(%arg0 : f32, %arg1 : i32, %arg2 : i32) {
- // expected-error at +1 {{op failed to verify that all of {value, result} have same type}}
+ // expected-error at +1 {{op failed to verify that all of {value, shuffleResult} have same type}}
%shfl, %pred = "gpu.shuffle"(%arg0, %arg1, %arg2) { mode = #gpu<shuffle_mode xor> } : (f32, i32, i32) -> (i32, i1)
return
}
diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index 37a4ea350bc4f..3426bbb605dd1 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -692,7 +692,7 @@ static Value warpReduction(Location loc, OpBuilder &builder, Value input,
.create<gpu::ShuffleOp>(loc, laneVal, i,
/*width=*/size,
/*mode=*/gpu::ShuffleMode::XOR)
- .result();
+ .getShuffleResult();
laneVal = makeArithReduction(builder, loc, kind, laneVal, shuffled);
}
return laneVal;
More information about the flang-commits
mailing list