[Mlir-commits] [mlir] de155f4 - [MLIR][OpenMP] Pretty printer and parser for omp.wsloop
Kiran Chandramohan
llvmlistbot at llvm.org
Thu Mar 18 06:50:44 PDT 2021
Author: David Truby
Date: 2021-03-18T13:37:01Z
New Revision: de155f4af2b5f0916b8f2d745e6da520bb7e1058
URL: https://github.com/llvm/llvm-project/commit/de155f4af2b5f0916b8f2d745e6da520bb7e1058
DIFF: https://github.com/llvm/llvm-project/commit/de155f4af2b5f0916b8f2d745e6da520bb7e1058.diff
LOG: [MLIR][OpenMP] Pretty printer and parser for omp.wsloop
Co-authored-by: Kiran Chandramohan <kiran.chandramohan at arm.com>
Reviewed By: ftynse
Differential Revision: https://reviews.llvm.org/D92327
Added:
Modified:
mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir
mlir/test/Dialect/OpenMP/ops.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index 24a06c4d0d00..6c1f5c0e7f10 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -116,7 +116,8 @@ def TerminatorOp : OpenMP_Op<"terminator", [Terminator]> {
// 2.9.2 Workshare Loop Construct
//===----------------------------------------------------------------------===//
-def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments]> {
+def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments,
+ AllTypesMatch<["lowerBound", "upperBound", "step"]>]> {
let summary = "workshare loop construct";
let description = [{
The workshare loop construct specifies that the iterations of the loop(s)
@@ -130,13 +131,13 @@ def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments]> {
by "omp.yield" instruction without operands.
```
- omp.wsloop (%i1, %i2) = (%c0, %c0) to (%c10, %c10) step (%c1, %c1) {
- %a = load %arrA[%i1, %i2] : memref<?x?xf32>
- %b = load %arrB[%i1, %i2] : memref<?x?xf32>
- %sum = addf %a, %b : f32
- store %sum, %arrC[%i1, %i2] : memref<?x?xf32>
- omp.yield
- }
+ omp.wsloop (%i1, %i2) : index = (%c0, %c0) to (%c10, %c10) step (%c1, %c1) {
+ %a = load %arrA[%i1, %i2] : memref<?x?xf32>
+ %b = load %arrB[%i1, %i2] : memref<?x?xf32>
+ %sum = addf %a, %b : f32
+ store %sum, %arrC[%i1, %i2] : memref<?x?xf32>
+ omp.yield
+ }
```
`private_vars`, `firstprivate_vars`, `lastprivate_vars` and `linear_vars`
@@ -181,10 +182,23 @@ def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments]> {
OptionalAttr<OrderKind>:$order_val,
UnitAttr:$inclusive);
+ let skipDefaultBuilders = 1;
+
let builders = [
OpBuilder<(ins "ValueRange":$lowerBound, "ValueRange":$upperBound,
- "ValueRange":$step,
- CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes)>
+ "ValueRange":$step,
+ CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes)>,
+ OpBuilder<(ins "TypeRange":$resultTypes, "ValueRange":$lowerBound,
+ "ValueRange":$upperBound, "ValueRange":$step,
+ "ValueRange":$privateVars, "ValueRange":$firstprivateVars,
+ "ValueRange":$lastprivate_vars, "ValueRange":$linear_vars,
+ "ValueRange":$linear_step_vars, "StringAttr":$schedule_val,
+ "Value":$schedule_chunk_var, "IntegerAttr":$collapse_val,
+ "UnitAttr":$nowait, "IntegerAttr":$ordered_val,
+ "StringAttr":$order_val, "UnitAttr":$inclusive, CArg<"bool",
+ "true">:$buildBody)>,
+ OpBuilder<(ins "TypeRange":$resultTypes, "ValueRange":$operands,
+ CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes)>
];
let regions = (region AnyRegion:$region);
@@ -193,6 +207,8 @@ def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments]> {
/// Returns the number of loops in the workshape loop nest.
unsigned getNumLoops() { return lowerBound().size(); }
}];
+ let parser = [{ return parseWsLoopOp(parser, result); }];
+ let printer = [{ return printWsLoopOp(p, *this); }];
}
def YieldOp : OpenMP_Op<"yield", [NoSideEffect, ReturnLike, Terminator,
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index 907ba65c07b7..06854cd99be1 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -17,6 +17,7 @@
#include "mlir/IR/OperationSupport.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include <cstddef>
@@ -172,8 +173,8 @@ static void printParallelOp(OpAsmPrinter &p, ParallelOp op) {
}
/// Emit an error if the same clause is present more than once on an operation.
-static ParseResult allowedOnce(OpAsmParser &parser, llvm::StringRef clause,
- llvm::StringRef operation) {
+static ParseResult allowedOnce(OpAsmParser &parser, StringRef clause,
+ StringRef operation) {
return parser.emitError(parser.getNameLoc())
<< " at most one " << clause << " clause can appear on the "
<< operation << " operation";
@@ -213,7 +214,7 @@ static ParseResult parseParallelOp(OpAsmParser &parser,
SmallVector<OpAsmParser::OperandType, 4> allocators;
SmallVector<Type, 4> allocatorTypes;
std::array<int, 8> segments{0, 0, 0, 0, 0, 0, 0, 0};
- llvm::StringRef keyword;
+ StringRef keyword;
bool defaultVal = false;
bool procBind = false;
@@ -225,11 +226,11 @@ static ParseResult parseParallelOp(OpAsmParser &parser,
const int copyinClausePos = 5;
const int allocateClausePos = 6;
const int allocatorPos = 7;
- const llvm::StringRef opName = result.name.getStringRef();
+ const StringRef opName = result.name.getStringRef();
while (succeeded(parser.parseOptionalKeyword(&keyword))) {
if (keyword == "if") {
- // Fail if there was already another if condition
+ // Fail if there was already another if condition.
if (segments[ifClausePos])
return allowedOnce(parser, "if", opName);
if (parser.parseLParen() || parser.parseOperand(ifCond.first) ||
@@ -237,7 +238,7 @@ static ParseResult parseParallelOp(OpAsmParser &parser,
return failure();
segments[ifClausePos] = 1;
} else if (keyword == "num_threads") {
- // fail if there was already another num_threads clause
+ // Fail if there was already another num_threads clause.
if (segments[numThreadsClausePos])
return allowedOnce(parser, "num_threads", opName);
if (parser.parseLParen() || parser.parseOperand(numThreads.first) ||
@@ -245,35 +246,35 @@ static ParseResult parseParallelOp(OpAsmParser &parser,
return failure();
segments[numThreadsClausePos] = 1;
} else if (keyword == "private") {
- // fail if there was already another private clause
+ // Fail if there was already another private clause.
if (segments[privateClausePos])
return allowedOnce(parser, "private", opName);
if (parseOperandAndTypeList(parser, privates, privateTypes))
return failure();
segments[privateClausePos] = privates.size();
} else if (keyword == "firstprivate") {
- // fail if there was already another firstprivate clause
+ // Fail if there was already another firstprivate clause.
if (segments[firstprivateClausePos])
return allowedOnce(parser, "firstprivate", opName);
if (parseOperandAndTypeList(parser, firstprivates, firstprivateTypes))
return failure();
segments[firstprivateClausePos] = firstprivates.size();
} else if (keyword == "shared") {
- // fail if there was already another shared clause
+ // Fail if there was already another shared clause.
if (segments[sharedClausePos])
return allowedOnce(parser, "shared", opName);
if (parseOperandAndTypeList(parser, shareds, sharedTypes))
return failure();
segments[sharedClausePos] = shareds.size();
} else if (keyword == "copyin") {
- // fail if there was already another copyin clause
+ // Fail if there was already another copyin clause.
if (segments[copyinClausePos])
return allowedOnce(parser, "copyin", opName);
if (parseOperandAndTypeList(parser, copyins, copyinTypes))
return failure();
segments[copyinClausePos] = copyins.size();
} else if (keyword == "allocate") {
- // fail if there was already another allocate clause
+ // Fail if there was already another allocate clause.
if (segments[allocateClausePos])
return allowedOnce(parser, "allocate", opName);
if (parseAllocateAndAllocator(parser, allocates, allocateTypes,
@@ -282,27 +283,27 @@ static ParseResult parseParallelOp(OpAsmParser &parser,
segments[allocateClausePos] = allocates.size();
segments[allocatorPos] = allocators.size();
} else if (keyword == "default") {
- // fail if there was already another default clause
+ // Fail if there was already another default clause.
if (defaultVal)
return allowedOnce(parser, "default", opName);
defaultVal = true;
- llvm::StringRef defval;
+ StringRef defval;
if (parser.parseLParen() || parser.parseKeyword(&defval) ||
parser.parseRParen())
return failure();
- llvm::SmallString<16> attrval;
+ SmallString<16> attrval;
// The def prefix is required for the attribute as "private" is a keyword
- // in C++
+ // in C++.
attrval += "def";
attrval += defval;
auto attr = parser.getBuilder().getStringAttr(attrval);
result.addAttribute("default_val", attr);
} else if (keyword == "proc_bind") {
- // fail if there was already another proc_bind clause
+ // Fail if there was already another proc_bind clause.
if (procBind)
return allowedOnce(parser, "proc_bind", opName);
procBind = true;
- llvm::StringRef bind;
+ StringRef bind;
if (parser.parseLParen() || parser.parseKeyword(&bind) ||
parser.parseRParen())
return failure();
@@ -315,48 +316,48 @@ static ParseResult parseParallelOp(OpAsmParser &parser,
}
}
- // Add if parameter
+ // Add if parameter.
if (segments[ifClausePos] &&
parser.resolveOperand(ifCond.first, ifCond.second, result.operands))
return failure();
- // Add num_threads parameter
+ // Add num_threads parameter.
if (segments[numThreadsClausePos] &&
parser.resolveOperand(numThreads.first, numThreads.second,
result.operands))
return failure();
- // Add private parameters
+ // Add private parameters.
if (segments[privateClausePos] &&
parser.resolveOperands(privates, privateTypes, privates[0].location,
result.operands))
return failure();
- // Add firstprivate parameters
+ // Add firstprivate parameters.
if (segments[firstprivateClausePos] &&
parser.resolveOperands(firstprivates, firstprivateTypes,
firstprivates[0].location, result.operands))
return failure();
- // Add shared parameters
+ // Add shared parameters.
if (segments[sharedClausePos] &&
parser.resolveOperands(shareds, sharedTypes, shareds[0].location,
result.operands))
return failure();
- // Add copyin parameters
+ // Add copyin parameters.
if (segments[copyinClausePos] &&
parser.resolveOperands(copyins, copyinTypes, copyins[0].location,
result.operands))
return failure();
- // Add allocate parameters
+ // Add allocate parameters.
if (segments[allocateClausePos] &&
parser.resolveOperands(allocates, allocateTypes, allocates[0].location,
result.operands))
return failure();
- // Add allocator parameters
+ // Add allocator parameters.
if (segments[allocatorPos] &&
parser.resolveOperands(allocators, allocatorTypes, allocators[0].location,
result.operands))
@@ -373,6 +374,335 @@ static ParseResult parseParallelOp(OpAsmParser &parser,
return success();
}
+/// linear ::= `linear` `(` linear-list `)`
+/// linear-list := linear-val | linear-val linear-list
+/// linear-val := ssa-id-and-type `=` ssa-id-and-type
+static ParseResult
+parseLinearClause(OpAsmParser &parser,
+ SmallVectorImpl<OpAsmParser::OperandType> &vars,
+ SmallVectorImpl<Type> &types,
+ SmallVectorImpl<OpAsmParser::OperandType> &stepVars) {
+ if (parser.parseLParen())
+ return failure();
+
+ do {
+ OpAsmParser::OperandType var;
+ Type type;
+ OpAsmParser::OperandType stepVar;
+ if (parser.parseOperand(var) || parser.parseEqual() ||
+ parser.parseOperand(stepVar) || parser.parseColonType(type))
+ return failure();
+
+ vars.push_back(var);
+ types.push_back(type);
+ stepVars.push_back(stepVar);
+ } while (succeeded(parser.parseOptionalComma()));
+
+ if (parser.parseRParen())
+ return failure();
+
+ return success();
+}
+
+/// schedule ::= `schedule` `(` sched-list `)`
+/// sched-list ::= sched-val | sched-val sched-list
+/// sched-val ::= sched-with-chunk | sched-wo-chunk
+/// sched-with-chunk ::= sched-with-chunk-types (`=` ssa-id-and-type)?
+/// sched-with-chunk-types ::= `static` | `dynamic` | `guided`
+/// sched-wo-chunk ::= `auto` | `runtime`
+static ParseResult
+parseScheduleClause(OpAsmParser &parser, SmallString<8> &schedule,
+ Optional<OpAsmParser::OperandType> &chunkSize) {
+ if (parser.parseLParen())
+ return failure();
+
+ StringRef keyword;
+ if (parser.parseKeyword(&keyword))
+ return failure();
+
+ schedule = keyword;
+ if (keyword == "static" || keyword == "dynamic" || keyword == "guided") {
+ if (succeeded(parser.parseOptionalEqual())) {
+ chunkSize = OpAsmParser::OperandType{};
+ if (parser.parseOperand(*chunkSize))
+ return failure();
+ } else {
+ chunkSize = llvm::NoneType::None;
+ }
+ } else if (keyword == "auto" || keyword == "runtime") {
+ chunkSize = llvm::NoneType::None;
+ } else {
+ return parser.emitError(parser.getNameLoc()) << " expected schedule kind";
+ }
+
+ if (parser.parseRParen())
+ return failure();
+
+ return success();
+}
+
+/// Parses an OpenMP Workshare Loop operation
+///
+/// operation ::= `omp.wsloop` loop-control clause-list
+/// loop-control ::= `(` ssa-id-list `)` `:` type `=` loop-bounds
+/// loop-bounds := `(` ssa-id-list `)` to `(` ssa-id-list `)` steps
+/// steps := `step` `(`ssa-id-list`)`
+/// clause-list ::= clause | empty | clause-list
+/// clause ::= private | firstprivate | lastprivate | linear | schedule |
+// collapse | nowait | ordered | order | inclusive
+/// private ::= `private` `(` ssa-id-and-type-list `)`
+/// firstprivate ::= `firstprivate` `(` ssa-id-and-type-list `)`
+/// lastprivate ::= `lastprivate` `(` ssa-id-and-type-list `)`
+/// linear ::= `linear` `(` linear-list `)`
+/// schedule ::= `schedule` `(` sched-list `)`
+/// collapse ::= `collapse` `(` ssa-id-and-type `)`
+/// nowait ::= `nowait`
+/// ordered ::= `ordered` `(` ssa-id-and-type `)`
+/// order ::= `order` `(` `concurrent` `)`
+/// inclusive ::= `inclusive`
+///
+static ParseResult parseWsLoopOp(OpAsmParser &parser, OperationState &result) {
+ Type loopVarType;
+ int numIVs;
+
+ // Parse an opening `(` followed by induction variables followed by `)`
+ SmallVector<OpAsmParser::OperandType> ivs;
+ if (parser.parseRegionArgumentList(ivs, /*requiredOperandCount=*/-1,
+ OpAsmParser::Delimiter::Paren))
+ return failure();
+
+ numIVs = static_cast<int>(ivs.size());
+
+ if (parser.parseColonType(loopVarType))
+ return failure();
+
+ // Parse loop bounds.
+ SmallVector<OpAsmParser::OperandType> lower;
+ if (parser.parseEqual() ||
+ parser.parseOperandList(lower, numIVs, OpAsmParser::Delimiter::Paren) ||
+ parser.resolveOperands(lower, loopVarType, result.operands))
+ return failure();
+
+ SmallVector<OpAsmParser::OperandType> upper;
+ if (parser.parseKeyword("to") ||
+ parser.parseOperandList(upper, numIVs, OpAsmParser::Delimiter::Paren) ||
+ parser.resolveOperands(upper, loopVarType, result.operands))
+ return failure();
+
+ // Parse step values.
+ SmallVector<OpAsmParser::OperandType> steps;
+ if (parser.parseKeyword("step") ||
+ parser.parseOperandList(steps, numIVs, OpAsmParser::Delimiter::Paren) ||
+ parser.resolveOperands(steps, loopVarType, result.operands))
+ return failure();
+
+ SmallVector<OpAsmParser::OperandType> privates;
+ SmallVector<Type> privateTypes;
+ SmallVector<OpAsmParser::OperandType> firstprivates;
+ SmallVector<Type> firstprivateTypes;
+ SmallVector<OpAsmParser::OperandType> lastprivates;
+ SmallVector<Type> lastprivateTypes;
+ SmallVector<OpAsmParser::OperandType> linears;
+ SmallVector<Type> linearTypes;
+ SmallVector<OpAsmParser::OperandType> linearSteps;
+ SmallString<8> schedule;
+ Optional<OpAsmParser::OperandType> scheduleChunkSize;
+ std::array<int, 9> segments{numIVs, numIVs, numIVs, 0, 0, 0, 0, 0, 0};
+
+ const StringRef opName = result.name.getStringRef();
+ StringRef keyword;
+
+ enum SegmentPos {
+ lbPos = 0,
+ ubPos,
+ stepPos,
+ privateClausePos,
+ firstprivateClausePos,
+ lastprivateClausePos,
+ linearClausePos,
+ linearStepPos,
+ scheduleClausePos,
+ };
+
+ while (succeeded(parser.parseOptionalKeyword(&keyword))) {
+ if (keyword == "private") {
+ if (segments[privateClausePos])
+ return allowedOnce(parser, "private", opName);
+ if (parseOperandAndTypeList(parser, privates, privateTypes))
+ return failure();
+ segments[privateClausePos] = privates.size();
+ } else if (keyword == "firstprivate") {
+ // fail if there was already another firstprivate clause
+ if (segments[firstprivateClausePos])
+ return allowedOnce(parser, "firstprivate", opName);
+ if (parseOperandAndTypeList(parser, firstprivates, firstprivateTypes))
+ return failure();
+ segments[firstprivateClausePos] = firstprivates.size();
+ } else if (keyword == "lastprivate") {
+ // fail if there was already another shared clause
+ if (segments[lastprivateClausePos])
+ return allowedOnce(parser, "lastprivate", opName);
+ if (parseOperandAndTypeList(parser, lastprivates, lastprivateTypes))
+ return failure();
+ segments[lastprivateClausePos] = lastprivates.size();
+ } else if (keyword == "linear") {
+ // fail if there was already another linear clause
+ if (segments[linearClausePos])
+ return allowedOnce(parser, "linear", opName);
+ if (parseLinearClause(parser, linears, linearTypes, linearSteps))
+ return failure();
+ segments[linearClausePos] = linears.size();
+ segments[linearStepPos] = linearSteps.size();
+ } else if (keyword == "schedule") {
+ if (!schedule.empty())
+ return allowedOnce(parser, "schedule", opName);
+ if (parseScheduleClause(parser, schedule, scheduleChunkSize))
+ return failure();
+ if (scheduleChunkSize) {
+ segments[scheduleClausePos] = 1;
+ }
+ } else if (keyword == "collapse") {
+ auto type = parser.getBuilder().getI64Type();
+ mlir::IntegerAttr attr;
+ if (parser.parseLParen() || parser.parseAttribute(attr, type) ||
+ parser.parseRParen())
+ return failure();
+ result.addAttribute("collapse_val", attr);
+ } else if (keyword == "nowait") {
+ auto attr = UnitAttr::get(parser.getBuilder().getContext());
+ result.addAttribute("nowait", attr);
+ } else if (keyword == "ordered") {
+ mlir::IntegerAttr attr;
+ if (succeeded(parser.parseOptionalLParen())) {
+ auto type = parser.getBuilder().getI64Type();
+ if (parser.parseAttribute(attr, type))
+ return failure();
+ if (parser.parseRParen())
+ return failure();
+ } else {
+ // Use 0 to represent no ordered parameter was specified
+ attr = parser.getBuilder().getI64IntegerAttr(0);
+ }
+ result.addAttribute("ordered_val", attr);
+ } else if (keyword == "order") {
+ StringRef order;
+ if (parser.parseLParen() || parser.parseKeyword(&order) ||
+ parser.parseRParen())
+ return failure();
+ auto attr = parser.getBuilder().getStringAttr(order);
+ result.addAttribute("order", attr);
+ } else if (keyword == "inclusive") {
+ auto attr = UnitAttr::get(parser.getBuilder().getContext());
+ result.addAttribute("inclusive", attr);
+ }
+ }
+
+ if (segments[privateClausePos]) {
+ parser.resolveOperands(privates, privateTypes, privates[0].location,
+ result.operands);
+ }
+
+ if (segments[firstprivateClausePos]) {
+ parser.resolveOperands(firstprivates, firstprivateTypes,
+ firstprivates[0].location, result.operands);
+ }
+
+ if (segments[lastprivateClausePos]) {
+ parser.resolveOperands(lastprivates, lastprivateTypes,
+ lastprivates[0].location, result.operands);
+ }
+
+ if (segments[linearClausePos]) {
+ parser.resolveOperands(linears, linearTypes, linears[0].location,
+ result.operands);
+ auto linearStepType = parser.getBuilder().getI32Type();
+ SmallVector<Type> linearStepTypes(linearSteps.size(), linearStepType);
+ parser.resolveOperands(linearSteps, linearStepTypes,
+ linearSteps[0].location, result.operands);
+ }
+
+ if (!schedule.empty()) {
+ schedule[0] = llvm::toUpper(schedule[0]);
+ auto attr = parser.getBuilder().getStringAttr(schedule);
+ result.addAttribute("schedule_val", attr);
+ if (scheduleChunkSize) {
+ auto chunkSizeType = parser.getBuilder().getI32Type();
+ parser.resolveOperand(*scheduleChunkSize, chunkSizeType, result.operands);
+ }
+ }
+
+ result.addAttribute("operand_segment_sizes",
+ parser.getBuilder().getI32VectorAttr(segments));
+
+ // Now parse the body.
+ Region *body = result.addRegion();
+ SmallVector<Type> ivTypes(numIVs, loopVarType);
+ if (parser.parseRegion(*body, ivs, ivTypes))
+ return failure();
+ return success();
+}
+
+static void printWsLoopOp(OpAsmPrinter &p, WsLoopOp op) {
+ auto args = op.getRegion().front().getArguments();
+ p << op.getOperationName() << " (" << args << ") : " << args[0].getType()
+ << " = (" << op.lowerBound() << ") to (" << op.upperBound() << ") step ("
+ << op.step() << ")";
+
+ // Print private, firstprivate, shared and copyin parameters
+ auto printDataVars = [&p](StringRef name, OperandRange vars) {
+ if (vars.empty())
+ return;
+
+ p << " " << name << "(";
+ llvm::interleaveComma(
+ vars, p, [&](const Value &v) { p << v << " : " << v.getType(); });
+ p << ")";
+ };
+ printDataVars("private", op.private_vars());
+ printDataVars("firstprivate", op.firstprivate_vars());
+ printDataVars("lastprivate", op.lastprivate_vars());
+
+ auto linearVars = op.linear_vars();
+ auto linearVarsSize = linearVars.size();
+ if (linearVarsSize) {
+ p << " "
+ << "linear"
+ << "(";
+ for (unsigned i = 0; i < linearVarsSize; ++i) {
+ std::string separator = i == linearVarsSize - 1 ? ")" : ", ";
+ p << linearVars[i];
+ if (op.linear_step_vars().size() > i)
+ p << " = " << op.linear_step_vars()[i];
+ p << " : " << linearVars[i].getType() << separator;
+ }
+ }
+
+ if (auto sched = op.schedule_val()) {
+ auto schedLower = sched->lower();
+ p << " schedule(" << schedLower;
+ if (auto chunk = op.schedule_chunk_var()) {
+ p << " = " << chunk;
+ }
+ p << ")";
+ }
+
+ if (auto collapse = op.collapse_val())
+ p << " collapse(" << collapse << ")";
+
+ if (op.nowait())
+ p << " nowait";
+
+ if (auto ordered = op.ordered_val()) {
+ p << " ordered(" << ordered << ")";
+ }
+
+ if (op.inclusive()) {
+ p << " inclusive";
+ }
+
+ p.printRegion(op.region(), /*printEntryBlockArgs=*/false);
+}
+
//===----------------------------------------------------------------------===//
// WsLoopOp
//===----------------------------------------------------------------------===//
@@ -386,9 +716,71 @@ void WsLoopOp::build(OpBuilder &builder, OperationState &state,
/*linear_vars=*/ValueRange(), /*linear_step_vars=*/ValueRange(),
/*schedule_val=*/nullptr, /*schedule_chunk_var=*/nullptr,
/*collapse_val=*/nullptr,
- /*nowait=*/false, /*ordered_val=*/nullptr, /*order_val=*/nullptr,
- /*inclusive=*/false);
+ /*nowait=*/nullptr, /*ordered_val=*/nullptr, /*order_val=*/nullptr,
+ /*inclusive=*/nullptr, /*buildBody=*/false);
+ state.addAttributes(attributes);
+}
+
+void WsLoopOp::build(OpBuilder &, OperationState &state, TypeRange resultTypes,
+ ValueRange operands, ArrayRef<NamedAttribute> attributes) {
+ state.addOperands(operands);
state.addAttributes(attributes);
+ (void)state.addRegion();
+ assert(resultTypes.size() == 0u && "mismatched number of return types");
+ state.addTypes(resultTypes);
+}
+
+void WsLoopOp::build(OpBuilder &builder, OperationState &result,
+ TypeRange typeRange, ValueRange lowerBounds,
+ ValueRange upperBounds, ValueRange steps,
+ ValueRange privateVars, ValueRange firstprivateVars,
+ ValueRange lastprivateVars, ValueRange linearVars,
+ ValueRange linearStepVars, StringAttr scheduleVal,
+ Value scheduleChunkVar, IntegerAttr collapseVal,
+ UnitAttr nowait, IntegerAttr orderedVal,
+ StringAttr orderVal, UnitAttr inclusive, bool buildBody) {
+ result.addOperands(lowerBounds);
+ result.addOperands(upperBounds);
+ result.addOperands(steps);
+ result.addOperands(privateVars);
+ result.addOperands(firstprivateVars);
+ result.addOperands(linearVars);
+ result.addOperands(linearStepVars);
+ if (scheduleChunkVar)
+ result.addOperands(scheduleChunkVar);
+
+ if (scheduleVal)
+ result.addAttribute("schedule_val", scheduleVal);
+ if (collapseVal)
+ result.addAttribute("collapse_val", collapseVal);
+ if (nowait)
+ result.addAttribute("nowait", nowait);
+ if (orderedVal)
+ result.addAttribute("ordered_val", orderedVal);
+ if (orderVal)
+ result.addAttribute("order", orderVal);
+ if (inclusive)
+ result.addAttribute("inclusive", inclusive);
+ result.addAttribute(
+ WsLoopOp::getOperandSegmentSizeAttr(),
+ builder.getI32VectorAttr(
+ {static_cast<int32_t>(lowerBounds.size()),
+ static_cast<int32_t>(upperBounds.size()),
+ static_cast<int32_t>(steps.size()),
+ static_cast<int32_t>(privateVars.size()),
+ static_cast<int32_t>(firstprivateVars.size()),
+ static_cast<int32_t>(lastprivateVars.size()),
+ static_cast<int32_t>(linearVars.size()),
+ static_cast<int32_t>(linearStepVars.size()),
+ static_cast<int32_t>(scheduleChunkVar != nullptr ? 1 : 0)}));
+
+ Region *bodyRegion = result.addRegion();
+ if (buildBody) {
+ OpBuilder::InsertionGuard guard(builder);
+ unsigned numIVs = steps.size();
+ SmallVector<Type, 8> argTypes(numIVs, steps.getType().front());
+ builder.createBlock(bodyRegion, {}, argTypes);
+ }
}
#define GET_OP_CLASSES
diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
index c1fc82e51c50..e0bb0134a14a 100644
--- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
@@ -34,10 +34,8 @@ func @branch_loop() {
func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) {
// CHECK: omp.parallel
omp.parallel {
- // CHECK: omp.wsloop
- // CHECK: (%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]], %[[ARG5]])
+ // CHECK: omp.wsloop (%[[ARG6:.*]], %[[ARG7:.*]]) : i64 = (%[[ARG0]], %[[ARG1]]) to (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[ARG5]]) {
"omp.wsloop"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) ( {
- // CHECK: ^{{.*}}(%[[ARG6:.*]]: i64, %[[ARG7:.*]]: i64):
^bb0(%arg6: index, %arg7: index): // no predecessors
// CHECK: "test.payload"(%[[ARG6]], %[[ARG7]]) : (i64, i64) -> ()
"test.payload"(%arg6, %arg7) : (index, index) -> ()
diff --git a/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir b/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir
index 466bd6aa96af..60a143a85006 100644
--- a/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir
+++ b/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir
@@ -4,9 +4,9 @@
func @parallel(%arg0: index, %arg1: index, %arg2: index,
%arg3: index, %arg4: index, %arg5: index) {
// CHECK: omp.parallel {
- // CHECK: "omp.wsloop"({{.*}}) ( {
+ // CHECK: omp.wsloop (%[[LVAR1:.*]], %[[LVAR2:.*]]) : index = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) {
scf.parallel (%i, %j) = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) {
- // CHECK: test.payload
+ // CHECK: "test.payload"(%[[LVAR1]], %[[LVAR2]]) : (index, index) -> ()
"test.payload"(%i, %j) : (index, index) -> ()
// CHECK: omp.yield
// CHECK: }
@@ -20,12 +20,12 @@ func @parallel(%arg0: index, %arg1: index, %arg2: index,
func @nested_loops(%arg0: index, %arg1: index, %arg2: index,
%arg3: index, %arg4: index, %arg5: index) {
// CHECK: omp.parallel {
- // CHECK: "omp.wsloop"({{.*}}) ( {
+ // CHECK: omp.wsloop (%[[LVAR_OUT1:.*]]) : index = (%arg0) to (%arg2) step (%arg4) {
// CHECK-NOT: omp.parallel
scf.parallel (%i) = (%arg0) to (%arg2) step (%arg4) {
- // CHECK: "omp.wsloop"({{.*}}) ( {
+ // CHECK: omp.wsloop (%[[LVAR_IN1:.*]]) : index = (%arg1) to (%arg3) step (%arg5) {
scf.parallel (%j) = (%arg1) to (%arg3) step (%arg5) {
- // CHECK: test.payload
+ // CHECK: "test.payload"(%[[LVAR_OUT1]], %[[LVAR_IN1]]) : (index, index) -> ()
"test.payload"(%i, %j) : (index, index) -> ()
// CHECK: omp.yield
// CHECK: }
@@ -41,9 +41,9 @@ func @nested_loops(%arg0: index, %arg1: index, %arg2: index,
func @adjacent_loops(%arg0: index, %arg1: index, %arg2: index,
%arg3: index, %arg4: index, %arg5: index) {
// CHECK: omp.parallel {
- // CHECK: "omp.wsloop"({{.*}}) ( {
+ // CHECK: omp.wsloop (%[[LVAR_AL1:.*]]) : index = (%arg0) to (%arg2) step (%arg4) {
scf.parallel (%i) = (%arg0) to (%arg2) step (%arg4) {
- // CHECK: test.payload1
+ // CHECK: "test.payload1"(%[[LVAR_AL1]]) : (index) -> ()
"test.payload1"(%i) : (index) -> ()
// CHECK: omp.yield
// CHECK: }
@@ -52,9 +52,9 @@ func @adjacent_loops(%arg0: index, %arg1: index, %arg2: index,
// CHECK: }
// CHECK: omp.parallel {
- // CHECK: "omp.wsloop"({{.*}}) ( {
+ // CHECK: omp.wsloop (%[[LVAR_AL2:.*]]) : index = (%arg1) to (%arg3) step (%arg5) {
scf.parallel (%j) = (%arg1) to (%arg3) step (%arg5) {
- // CHECK: test.payload2
+ // CHECK: "test.payload2"(%[[LVAR_AL2]]) : (index) -> ()
"test.payload2"(%j) : (index) -> ()
// CHECK: omp.yield
// CHECK: }
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index 6b9be10c7693..8f7f9c1ca69c 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -89,77 +89,192 @@ func @omp_parallel(%data_var : memref<i32>, %if_cond : i1, %num_threads : si32)
}
func @omp_parallel_pretty(%data_var : memref<i32>, %if_cond : i1, %num_threads : si32, %allocator : si32) -> () {
- // CHECK: omp.parallel
- omp.parallel {
- omp.terminator
- }
+ // CHECK: omp.parallel
+ omp.parallel {
+ omp.terminator
+ }
+
+ // CHECK: omp.parallel num_threads(%{{.*}} : si32)
+ omp.parallel num_threads(%num_threads : si32) {
+ omp.terminator
+ }
+
+ // CHECK: omp.parallel allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>)
+ omp.parallel allocate(%data_var : memref<i32> -> %data_var : memref<i32>) {
+ omp.terminator
+ }
+
+ // CHECK: omp.parallel private(%{{.*}} : memref<i32>, %{{.*}} : memref<i32>) firstprivate(%{{.*}} : memref<i32>)
+ omp.parallel private(%data_var : memref<i32>, %data_var : memref<i32>) firstprivate(%data_var : memref<i32>) {
+ omp.terminator
+ }
+
+ // CHECK omp.parallel shared(%{{.*}} : memref<i32>) copyin(%{{.*}} : memref<i32>, %{{.*}} : memref<i32>)
+ omp.parallel shared(%data_var : memref<i32>) copyin(%data_var : memref<i32>, %data_var : memref<i32>) {
+ omp.parallel if(%if_cond: i1) {
+ omp.terminator
+ }
+ omp.terminator
+ }
+
+ // CHECK omp.parallel if(%{{.*}}) num_threads(%{{.*}} : si32) private(%{{.*}} : memref<i32>) proc_bind(close)
+ omp.parallel num_threads(%num_threads : si32) if(%if_cond: i1)
+ private(%data_var : memref<i32>) proc_bind(close) {
+ omp.terminator
+ }
+
+ return
+}
- // CHECK: omp.parallel num_threads(%{{.*}} : si32)
- omp.parallel num_threads(%num_threads : si32) {
- omp.terminator
- }
+// CHECK-LABEL: omp_wsloop
+func @omp_wsloop(%lb : index, %ub : index, %step : index, %data_var : memref<i32>, %linear_var : i32, %chunk_var : i32) -> () {
- // CHECK: omp.parallel allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>)
- omp.parallel allocate(%data_var : memref<i32> -> %data_var : memref<i32>) {
- omp.terminator
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) private(%{{.*}} : memref<i32>, %{{.*}} : memref<i32>) collapse(2) ordered(1)
+ "omp.wsloop" (%lb, %ub, %step, %data_var, %data_var) ({
+ ^bb0(%iv: index):
+ omp.yield
+ }) {operand_segment_sizes = dense<[1,1,1,2,0,0,0,0,0]> : vector<9xi32>, collapse_val = 2, ordered_val = 1} :
+ (index, index, index, memref<i32>, memref<i32>) -> ()
+
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) linear(%{{.*}} = %{{.*}} : memref<i32>) schedule(static)
+ "omp.wsloop" (%lb, %ub, %step, %data_var, %linear_var) ({
+ ^bb0(%iv: index):
+ omp.yield
+ }) {operand_segment_sizes = dense<[1,1,1,0,0,0,1,1,0]> : vector<9xi32>, schedule_val = "Static"} :
+ (index, index, index, memref<i32>, i32) -> ()
+
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) linear(%{{.*}} = %{{.*}} : memref<i32>, %{{.*}} = %{{.*}} : memref<i32>) schedule(static)
+ "omp.wsloop" (%lb, %ub, %step, %data_var, %data_var, %linear_var, %linear_var) ({
+ ^bb0(%iv: index):
+ omp.yield
+ }) {operand_segment_sizes = dense<[1,1,1,0,0,0,2,2,0]> : vector<9xi32>, schedule_val = "Static"} :
+ (index, index, index, memref<i32>, memref<i32>, i32, i32) -> ()
+
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) private(%{{.*}} : memref<i32>) firstprivate(%{{.*}} : memref<i32>) lastprivate(%{{.*}} : memref<i32>) linear(%{{.*}} = %{{.*}} : memref<i32>) schedule(dynamic = %{{.*}}) collapse(3) ordered(2)
+ "omp.wsloop" (%lb, %ub, %step, %data_var, %data_var, %data_var, %data_var, %linear_var, %chunk_var) ({
+ ^bb0(%iv: index):
+ omp.yield
+ }) {operand_segment_sizes = dense<[1,1,1,1,1,1,1,1,1]> : vector<9xi32>, schedule_val = "Dynamic", collapse_val = 3, ordered_val = 2} :
+ (index, index, index, memref<i32>, memref<i32>, memref<i32>, memref<i32>, i32, i32) -> ()
+
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) private(%{{.*}} : memref<i32>) schedule(auto) nowait
+ "omp.wsloop" (%lb, %ub, %step, %data_var) ({
+ ^bb0(%iv: index):
+ omp.yield
+ }) {operand_segment_sizes = dense<[1,1,1,1,0,0,0,0,0]> : vector<9xi32>, nowait, schedule_val = "Auto"} :
+ (index, index, index, memref<i32>) -> ()
+
+ return
+}
+
+// CHECK-LABEL: omp_wsloop_pretty
+func @omp_wsloop_pretty(%lb : index, %ub : index, %step : index,
+ %data_var : memref<i32>, %linear_var : i32, %chunk_var : i32) -> () {
+
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) private(%{{.*}} : memref<i32>)
+ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) private(%data_var : memref<i32>) collapse(2) ordered(2) {
+ omp.yield
}
- // CHECK: omp.parallel private(%{{.*}} : memref<i32>, %{{.*}} : memref<i32>) firstprivate(%{{.*}} : memref<i32>)
- omp.parallel private(%data_var : memref<i32>, %data_var : memref<i32>) firstprivate(%data_var : memref<i32>) {
- omp.terminator
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) linear(%{{.*}} = %{{.*}} : memref<i32>) schedule(static)
+ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) schedule(static) lastprivate(%data_var : memref<i32>) linear(%data_var = %linear_var : memref<i32>) {
+ omp.yield
}
- // CHECK omp.parallel shared(%{{.*}} : memref<i32>) copyin(%{{.*}} : memref<i32>, %{{.*}} : memref<i32>)
- omp.parallel shared(%data_var : memref<i32>) copyin(%data_var : memref<i32>, %data_var : memref<i32>) {
- omp.parallel if(%if_cond: i1) {
- omp.terminator
- }
- omp.terminator
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) private(%{{.*}} : memref<i32>) firstprivate(%{{.*}} : memref<i32>) lastprivate(%{{.*}} : memref<i32>) linear(%{{.*}} = %{{.*}} : memref<i32>) schedule(static = %{{.*}}) collapse(3) ordered(2)
+ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) ordered(2) private(%data_var : memref<i32>)
+ firstprivate(%data_var : memref<i32>) lastprivate(%data_var : memref<i32>) linear(%data_var = %linear_var : memref<i32>)
+ schedule(static = %chunk_var) collapse(3) {
+ omp.yield
}
- // CHECK omp.parallel if(%{{.*}}) num_threads(%{{.*}} : si32) private(%{{.*}} : memref<i32>) proc_bind(close)
- omp.parallel num_threads(%num_threads : si32) if(%if_cond: i1)
- private(%data_var : memref<i32>) proc_bind(close) {
- omp.terminator
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) private({{.*}} : memref<i32>)
+ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) private(%data_var : memref<i32>) {
+ omp.yield
}
return
}
-func @omp_wsloop(%lb : index, %ub : index, %step : index,
- %data_var : memref<i32>, %linear_var : si32, %chunk_var : si32) -> () {
+// CHECK-LABEL: omp_wsloop_pretty_multi_block
+func @omp_wsloop_pretty_multi_block(%lb : index, %ub : index, %step : index, %data1 : memref<?xi32>, %data2 : memref<?xi32>) -> () {
- // CHECK: "omp.wsloop"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}})
- "omp.wsloop" (%lb, %ub, %step, %data_var) ({
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
+ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) {
+ %1 = "test.payload"(%iv) : (index) -> (i32)
+ br ^bb1(%1: i32)
+ ^bb1(%arg: i32):
+ memref.store %arg, %data1[%iv] : memref<?xi32>
omp.yield
- }) {operand_segment_sizes = dense<[1,1,1,1,0,0,0,0,0]> : vector<9xi32>, collapse_val = 2, ordered_val = 1} :
- (index, index, index, memref<i32>) -> ()
+ }
- // CHECK: "omp.wsloop"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}})
- "omp.wsloop" (%lb, %lb, %ub, %ub, %step, %step, %data_var) ({
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
+ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) {
+ %c = "test.condition"(%iv) : (index) -> (i1)
+ %v1 = "test.payload"(%iv) : (index) -> (i32)
+ cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32)
+ ^bb1(%arg0: i32):
+ memref.store %arg0, %data1[%iv] : memref<?xi32>
+ br ^bb3
+ ^bb2(%arg1: i32):
+ memref.store %arg1, %data2[%iv] : memref<?xi32>
+ br ^bb3
+ ^bb3:
omp.yield
- }) {operand_segment_sizes = dense<[2,2,2,1,0,0,0,0,0]> : vector<9xi32>, collapse_val = 2, ordered_val = 1} :
- (index, index, index, index, index, index, memref<i32>) -> ()
-
+ }
- // CHECK: "omp.wsloop"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}})
- "omp.wsloop" (%lb, %ub, %step, %data_var, %linear_var) ({
+ // CHECK: omp.wsloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
+ omp.wsloop (%iv) : index = (%lb) to (%ub) step (%step) {
+ %c = "test.condition"(%iv) : (index) -> (i1)
+ %v1 = "test.payload"(%iv) : (index) -> (i32)
+ cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32)
+ ^bb1(%arg0: i32):
+ memref.store %arg0, %data1[%iv] : memref<?xi32>
omp.yield
- }) {operand_segment_sizes = dense<[1,1,1,0,0,0,1,1,0]> : vector<9xi32>, schedule_val = "Static"} :
- (index, index, index, memref<i32>, si32) -> ()
+ ^bb2(%arg1: i32):
+ memref.store %arg1, %data2[%iv] : memref<?xi32>
+ omp.yield
+ }
- // CHECK: "omp.wsloop"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}})
- "omp.wsloop" (%lb, %ub, %step, %data_var, %data_var, %data_var, %data_var, %linear_var, %chunk_var) ({
+ return
+}
+
+// CHECK-LABEL: omp_wsloop_pretty_non_index
+func @omp_wsloop_pretty_non_index(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i64, %ub2 : i64, %step2 : i64,
+ %data1 : memref<?xi32>, %data2 : memref<?xi64>) -> () {
+
+ // CHECK: omp.wsloop (%{{.*}}) : i32 = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
+ omp.wsloop (%iv1) : i32 = (%lb1) to (%ub1) step (%step1) {
+ %1 = "test.payload"(%iv1) : (i32) -> (index)
+ br ^bb1(%1: index)
+ ^bb1(%arg1: index):
+ memref.store %iv1, %data1[%arg1] : memref<?xi32>
omp.yield
- }) {operand_segment_sizes = dense<[1,1,1,1,1,1,1,1,1]> : vector<9xi32>, schedule_val = "Dynamic", collapse_val = 3, ordered_val = 2} :
- (index, index, index, memref<i32>, memref<i32>, memref<i32>, memref<i32>, si32, si32) -> ()
+ }
- // CHECK: "omp.wsloop"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}})
- "omp.wsloop" (%lb, %ub, %step, %data_var) ({
+ // CHECK: omp.wsloop (%{{.*}}) : i64 = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
+ omp.wsloop (%iv2) : i64 = (%lb2) to (%ub2) step (%step2) {
+ %2 = "test.payload"(%iv2) : (i64) -> (index)
+ br ^bb1(%2: index)
+ ^bb1(%arg2: index):
+ memref.store %iv2, %data2[%arg2] : memref<?xi64>
omp.yield
- }) {operand_segment_sizes = dense<[1,1,1,1,0,0,0,0,0]> : vector<9xi32>, nowait, schedule_val = "Auto"} :
- (index, index, index, memref<i32>) -> ()
+ }
+
+ return
+}
+// CHECK-LABEL: omp_wsloop_pretty_multiple
+func @omp_wsloop_pretty_multiple(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i32, %ub2 : i32, %step2 : i32, %data1 : memref<?xi32>) -> () {
+
+ // CHECK: omp.wsloop (%{{.*}}, %{{.*}}) : i32 = (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}})
+ omp.wsloop (%iv1, %iv2) : i32 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) {
+ %1 = "test.payload"(%iv1) : (i32) -> (index)
+ %2 = "test.payload"(%iv2) : (i32) -> (index)
+ memref.store %iv1, %data1[%1] : memref<?xi32>
+ memref.store %iv2, %data1[%2] : memref<?xi32>
+ omp.yield
+ }
return
}
More information about the Mlir-commits
mailing list