[llvm-branch-commits] [flang] [mlir] [WIP][flang] Introduce HLFIR lowerings to omp.workshare_loop_nest (PR #104748)
Ivan R. Ivanov via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sat Oct 19 10:22:55 PDT 2024
https://github.com/ivanradanov updated https://github.com/llvm/llvm-project/pull/104748
>From d001eec514d0e7104a2279165e76a0c1694174a1 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 20 Oct 2024 02:22:08 +0900
Subject: [PATCH 01/27] Fix op tests
---
mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 2 ++
mlir/test/Dialect/OpenMP/invalid.mlir | 11 ++++-------
2 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index e849f68e9c8380..43c1ec66e1ae31 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -1937,6 +1937,8 @@ void WorkshareOp::build(OpBuilder &builder, OperationState &state,
LogicalResult WorkshareLoopWrapperOp::verify() {
if (!(*this)->getParentOfType<WorkshareOp>())
return emitError() << "must be nested in an omp.workshare";
+ if (getNestedWrapper())
+ return emitError() << "cannot be composite";
return success();
}
diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir
index e8482574408c0c..d56629e76b09c3 100644
--- a/mlir/test/Dialect/OpenMP/invalid.mlir
+++ b/mlir/test/Dialect/OpenMP/invalid.mlir
@@ -2581,15 +2581,13 @@ func.func @omp_taskloop_invalid_composite(%lb: index, %ub: index, %step: index)
// -----
func.func @nested_wrapper(%idx : index) {
omp.workshare {
- // expected-error @below {{nested wrappers not supported}}
+ // expected-error @below {{cannot be composite}}
omp.workshare.loop_wrapper {
omp.simd {
omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
omp.yield
}
- omp.terminator
- }
- omp.terminator
+ } {omp.composite}
}
omp.terminator
}
@@ -2599,9 +2597,9 @@ func.func @nested_wrapper(%idx : index) {
// -----
func.func @not_wrapper() {
omp.workshare {
- // expected-error @below {{must be a loop wrapper}}
+ // expected-error @below {{op nested in loop wrapper is not another loop wrapper or `omp.loop_nest`}}
omp.workshare.loop_wrapper {
- omp.terminator
+ %0 = arith.constant 0 : index
}
omp.terminator
}
@@ -2615,7 +2613,6 @@ func.func @missing_workshare(%idx : index) {
omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) {
omp.yield
}
- omp.terminator
}
return
}
>From bf363883787e9b4989dd858f8573579688f7044b Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Wed, 31 Jul 2024 14:11:47 +0900
Subject: [PATCH 02/27] [flang][omp] Emit omp.workshare in frontend
Fix lower test for workshare
---
flang/lib/Lower/OpenMP/OpenMP.cpp | 30 +++++++++++++++++++++++----
flang/test/Lower/OpenMP/workshare.f90 | 6 +++---
2 files changed, 29 insertions(+), 7 deletions(-)
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index cf469003b7298d..22f6d5bd09cd65 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -1330,6 +1330,15 @@ static void genTaskwaitClauses(lower::AbstractConverter &converter,
loc, llvm::omp::Directive::OMPD_taskwait);
}
+static void genWorkshareClauses(lower::AbstractConverter &converter,
+ semantics::SemanticsContext &semaCtx,
+ lower::StatementContext &stmtCtx,
+ const List<Clause> &clauses, mlir::Location loc,
+ mlir::omp::WorkshareOperands &clauseOps) {
+ ClauseProcessor cp(converter, semaCtx, clauses);
+ cp.processNowait(clauseOps);
+}
+
static void genTeamsClauses(lower::AbstractConverter &converter,
semantics::SemanticsContext &semaCtx,
lower::StatementContext &stmtCtx,
@@ -1923,6 +1932,22 @@ genTaskyieldOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
return converter.getFirOpBuilder().create<mlir::omp::TaskyieldOp>(loc);
}
+static mlir::omp::WorkshareOp
+genWorkshareOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
+ semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval,
+ mlir::Location loc, const ConstructQueue &queue,
+ ConstructQueue::iterator item) {
+ lower::StatementContext stmtCtx;
+ mlir::omp::WorkshareOperands clauseOps;
+ genWorkshareClauses(converter, semaCtx, stmtCtx, item->clauses, loc, clauseOps);
+
+ return genOpWithBody<mlir::omp::WorkshareOp>(
+ OpWithBodyGenInfo(converter, symTable, semaCtx, loc, eval,
+ llvm::omp::Directive::OMPD_workshare)
+ .setClauses(&item->clauses),
+ queue, item, clauseOps);
+}
+
static mlir::omp::TeamsOp
genTeamsOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval,
@@ -2515,10 +2540,7 @@ static void genOMPDispatch(lower::AbstractConverter &converter,
llvm::omp::getOpenMPDirectiveName(dir) + ")");
// case llvm::omp::Directive::OMPD_workdistribute:
case llvm::omp::Directive::OMPD_workshare:
- // FIXME: Workshare is not a commonly used OpenMP construct, an
- // implementation for this feature will come later. For the codes
- // that use this construct, add a single construct for now.
- genSingleOp(converter, symTable, semaCtx, eval, loc, queue, item);
+ genWorkshareOp(converter, symTable, semaCtx, eval, loc, queue, item);
break;
default:
// Combined and composite constructs should have been split into a sequence
diff --git a/flang/test/Lower/OpenMP/workshare.f90 b/flang/test/Lower/OpenMP/workshare.f90
index 1e11677a15e1f0..8e771952f5b6da 100644
--- a/flang/test/Lower/OpenMP/workshare.f90
+++ b/flang/test/Lower/OpenMP/workshare.f90
@@ -6,7 +6,7 @@ subroutine sb1(arr)
integer :: arr(:)
!CHECK: omp.parallel {
!$omp parallel
-!CHECK: omp.single {
+!CHECK: omp.workshare {
!$omp workshare
arr = 0
!$omp end workshare
@@ -20,7 +20,7 @@ subroutine sb2(arr)
integer :: arr(:)
!CHECK: omp.parallel {
!$omp parallel
-!CHECK: omp.single nowait {
+!CHECK: omp.workshare nowait {
!$omp workshare
arr = 0
!$omp end workshare nowait
@@ -33,7 +33,7 @@ subroutine sb2(arr)
subroutine sb3(arr)
integer :: arr(:)
!CHECK: omp.parallel {
-!CHECK: omp.single {
+!CHECK: omp.workshare {
!$omp parallel workshare
arr = 0
!$omp end parallel workshare
>From e23cf320ed37cb73971bed74cf260e524210a187 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Thu, 22 Aug 2024 17:01:43 +0900
Subject: [PATCH 03/27] Fix function signature
---
flang/lib/Lower/OpenMP/OpenMP.cpp | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index 22f6d5bd09cd65..daeb928e53d061 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -1934,12 +1934,14 @@ genTaskyieldOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
static mlir::omp::WorkshareOp
genWorkshareOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
- semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval,
- mlir::Location loc, const ConstructQueue &queue,
- ConstructQueue::iterator item) {
+ semantics::SemanticsContext &semaCtx,
+ lower::pft::Evaluation &eval, mlir::Location loc,
+ const ConstructQueue &queue,
+ ConstructQueue::const_iterator item) {
lower::StatementContext stmtCtx;
mlir::omp::WorkshareOperands clauseOps;
- genWorkshareClauses(converter, semaCtx, stmtCtx, item->clauses, loc, clauseOps);
+ genWorkshareClauses(converter, semaCtx, stmtCtx, item->clauses, loc,
+ clauseOps);
return genOpWithBody<mlir::omp::WorkshareOp>(
OpWithBodyGenInfo(converter, symTable, semaCtx, loc, eval,
>From 6f114e0501f1759eab34dc8ddfc3030c03037cd4 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Thu, 22 Aug 2024 18:07:05 +0900
Subject: [PATCH 04/27] [flang] Introduce ws loop nest generation for HLFIR
lowering
Emit loop nests in a custom wrapper
Only emit unordered loops as omp loops
Fix uninitialized memory bug in genLoopNest
---
.../flang/Optimizer/Builder/HLFIRTools.h | 12 +++--
flang/lib/Lower/ConvertCall.cpp | 2 +-
flang/lib/Lower/OpenMP/ReductionProcessor.cpp | 4 +-
flang/lib/Optimizer/Builder/HLFIRTools.cpp | 52 ++++++++++++++-----
.../HLFIR/Transforms/BufferizeHLFIR.cpp | 3 +-
.../LowerHLFIROrderedAssignments.cpp | 33 ++++++------
.../Transforms/OptimizedBufferization.cpp | 6 +--
7 files changed, 69 insertions(+), 43 deletions(-)
diff --git a/flang/include/flang/Optimizer/Builder/HLFIRTools.h b/flang/include/flang/Optimizer/Builder/HLFIRTools.h
index 6b41025eea0780..f073f494b3fb21 100644
--- a/flang/include/flang/Optimizer/Builder/HLFIRTools.h
+++ b/flang/include/flang/Optimizer/Builder/HLFIRTools.h
@@ -357,8 +357,8 @@ hlfir::ElementalOp genElementalOp(
/// Structure to describe a loop nest.
struct LoopNest {
- fir::DoLoopOp outerLoop;
- fir::DoLoopOp innerLoop;
+ mlir::Operation *outerOp = nullptr;
+ mlir::Block *body = nullptr;
llvm::SmallVector<mlir::Value> oneBasedIndices;
};
@@ -366,11 +366,13 @@ struct LoopNest {
/// \p isUnordered specifies whether the loops in the loop nest
/// are unordered.
LoopNest genLoopNest(mlir::Location loc, fir::FirOpBuilder &builder,
- mlir::ValueRange extents, bool isUnordered = false);
+ mlir::ValueRange extents, bool isUnordered = false,
+ bool emitWorkshareLoop = false);
inline LoopNest genLoopNest(mlir::Location loc, fir::FirOpBuilder &builder,
- mlir::Value shape, bool isUnordered = false) {
+ mlir::Value shape, bool isUnordered = false,
+ bool emitWorkshareLoop = false) {
return genLoopNest(loc, builder, getIndexExtents(loc, builder, shape),
- isUnordered);
+ isUnordered, emitWorkshareLoop);
}
/// Inline the body of an hlfir.elemental at the current insertion point
diff --git a/flang/lib/Lower/ConvertCall.cpp b/flang/lib/Lower/ConvertCall.cpp
index 9f5b58590fb79e..e84e7afbe82e09 100644
--- a/flang/lib/Lower/ConvertCall.cpp
+++ b/flang/lib/Lower/ConvertCall.cpp
@@ -2135,7 +2135,7 @@ class ElementalCallBuilder {
hlfir::genLoopNest(loc, builder, shape, !mustBeOrdered);
mlir::ValueRange oneBasedIndices = loopNest.oneBasedIndices;
auto insPt = builder.saveInsertionPoint();
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
callContext.stmtCtx.pushScope();
for (auto &preparedActual : loweredActuals)
if (preparedActual)
diff --git a/flang/lib/Lower/OpenMP/ReductionProcessor.cpp b/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
index 6b98ea3d0615b6..736de2ee511bef 100644
--- a/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
@@ -374,7 +374,7 @@ static void genBoxCombiner(fir::FirOpBuilder &builder, mlir::Location loc,
// know this won't miss any opportuinties for clever elemental inlining
hlfir::LoopNest nest = hlfir::genLoopNest(
loc, builder, shapeShift.getExtents(), /*isUnordered=*/true);
- builder.setInsertionPointToStart(nest.innerLoop.getBody());
+ builder.setInsertionPointToStart(nest.body);
mlir::Type refTy = fir::ReferenceType::get(seqTy.getEleTy());
auto lhsEleAddr = builder.create<fir::ArrayCoorOp>(
loc, refTy, lhs, shapeShift, /*slice=*/mlir::Value{},
@@ -388,7 +388,7 @@ static void genBoxCombiner(fir::FirOpBuilder &builder, mlir::Location loc,
builder, loc, redId, refTy, lhsEle, rhsEle);
builder.create<fir::StoreOp>(loc, scalarReduction, lhsEleAddr);
- builder.setInsertionPointAfter(nest.outerLoop);
+ builder.setInsertionPointAfter(nest.outerOp);
builder.create<mlir::omp::YieldOp>(loc, lhsAddr);
}
diff --git a/flang/lib/Optimizer/Builder/HLFIRTools.cpp b/flang/lib/Optimizer/Builder/HLFIRTools.cpp
index 8d0ae2f195178c..333331378841ed 100644
--- a/flang/lib/Optimizer/Builder/HLFIRTools.cpp
+++ b/flang/lib/Optimizer/Builder/HLFIRTools.cpp
@@ -20,6 +20,7 @@
#include "mlir/IR/IRMapping.h"
#include "mlir/Support/LLVM.h"
#include "llvm/ADT/TypeSwitch.h"
+#include <mlir/Dialect/OpenMP/OpenMPDialect.h>
#include <optional>
// Return explicit extents. If the base is a fir.box, this won't read it to
@@ -855,26 +856,51 @@ mlir::Value hlfir::inlineElementalOp(
hlfir::LoopNest hlfir::genLoopNest(mlir::Location loc,
fir::FirOpBuilder &builder,
- mlir::ValueRange extents, bool isUnordered) {
+ mlir::ValueRange extents, bool isUnordered,
+ bool emitWorkshareLoop) {
+ emitWorkshareLoop = emitWorkshareLoop && isUnordered;
hlfir::LoopNest loopNest;
assert(!extents.empty() && "must have at least one extent");
- auto insPt = builder.saveInsertionPoint();
+ mlir::OpBuilder::InsertionGuard guard(builder);
loopNest.oneBasedIndices.assign(extents.size(), mlir::Value{});
// Build loop nest from column to row.
auto one = builder.create<mlir::arith::ConstantIndexOp>(loc, 1);
mlir::Type indexType = builder.getIndexType();
- unsigned dim = extents.size() - 1;
- for (auto extent : llvm::reverse(extents)) {
- auto ub = builder.createConvert(loc, indexType, extent);
- loopNest.innerLoop =
- builder.create<fir::DoLoopOp>(loc, one, ub, one, isUnordered);
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
- // Reverse the indices so they are in column-major order.
- loopNest.oneBasedIndices[dim--] = loopNest.innerLoop.getInductionVar();
- if (!loopNest.outerLoop)
- loopNest.outerLoop = loopNest.innerLoop;
+ if (emitWorkshareLoop) {
+ auto wslw = builder.create<mlir::omp::WorkshareLoopWrapperOp>(loc);
+ loopNest.outerOp = wslw;
+ builder.createBlock(&wslw.getRegion());
+ mlir::omp::LoopNestOperands lnops;
+ lnops.loopInclusive = builder.getUnitAttr();
+ for (auto extent : llvm::reverse(extents)) {
+ lnops.loopLowerBounds.push_back(one);
+ lnops.loopUpperBounds.push_back(extent);
+ lnops.loopSteps.push_back(one);
+ }
+ auto lnOp = builder.create<mlir::omp::LoopNestOp>(loc, lnops);
+ builder.create<mlir::omp::TerminatorOp>(loc);
+ mlir::Block *block = builder.createBlock(&lnOp.getRegion());
+ for (auto extent : llvm::reverse(extents))
+ block->addArgument(extent.getType(), extent.getLoc());
+ loopNest.body = block;
+ builder.create<mlir::omp::YieldOp>(loc);
+ for (unsigned dim = 0; dim < extents.size(); dim++)
+ loopNest.oneBasedIndices[extents.size() - dim - 1] =
+ lnOp.getRegion().front().getArgument(dim);
+ } else {
+ unsigned dim = extents.size() - 1;
+ for (auto extent : llvm::reverse(extents)) {
+ auto ub = builder.createConvert(loc, indexType, extent);
+ auto doLoop =
+ builder.create<fir::DoLoopOp>(loc, one, ub, one, isUnordered);
+ loopNest.body = doLoop.getBody();
+ builder.setInsertionPointToStart(loopNest.body);
+ // Reverse the indices so they are in column-major order.
+ loopNest.oneBasedIndices[dim--] = doLoop.getInductionVar();
+ if (!loopNest.outerOp)
+ loopNest.outerOp = doLoop;
+ }
}
- builder.restoreInsertionPoint(insPt);
return loopNest;
}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
index a70a6b388c4b1a..07794828fce267 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
@@ -26,6 +26,7 @@
#include "flang/Optimizer/HLFIR/HLFIRDialect.h"
#include "flang/Optimizer/HLFIR/HLFIROps.h"
#include "flang/Optimizer/HLFIR/Passes.h"
+#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
#include "mlir/IR/Dominance.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
@@ -793,7 +794,7 @@ struct ElementalOpConversion
hlfir::LoopNest loopNest =
hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered());
auto insPt = builder.saveInsertionPoint();
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
auto yield = hlfir::inlineElementalOp(loc, builder, elemental,
loopNest.oneBasedIndices);
hlfir::Entity elementValue(yield.getElementValue());
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
index 85dd517cb57914..424566462e8fe0 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
@@ -464,7 +464,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) {
// if the LHS is not).
mlir::Value shape = hlfir::genShape(loc, builder, lhsEntity);
elementalLoopNest = hlfir::genLoopNest(loc, builder, shape);
- builder.setInsertionPointToStart(elementalLoopNest->innerLoop.getBody());
+ builder.setInsertionPointToStart(elementalLoopNest->body);
lhsEntity = hlfir::getElementAt(loc, builder, lhsEntity,
elementalLoopNest->oneBasedIndices);
rhsEntity = hlfir::getElementAt(loc, builder, rhsEntity,
@@ -484,7 +484,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) {
for (auto &cleanupConversion : argConversionCleanups)
cleanupConversion();
if (elementalLoopNest)
- builder.setInsertionPointAfter(elementalLoopNest->outerLoop);
+ builder.setInsertionPointAfter(elementalLoopNest->outerOp);
} else {
// TODO: preserve allocatable assignment aspects for forall once
// they are conveyed in hlfir.region_assign.
@@ -492,8 +492,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) {
}
generateCleanupIfAny(loweredLhs.elementalCleanup);
if (loweredLhs.vectorSubscriptLoopNest)
- builder.setInsertionPointAfter(
- loweredLhs.vectorSubscriptLoopNest->outerLoop);
+ builder.setInsertionPointAfter(loweredLhs.vectorSubscriptLoopNest->outerOp);
generateCleanupIfAny(oldRhsYield);
generateCleanupIfAny(loweredLhs.nonElementalCleanup);
}
@@ -518,8 +517,8 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) {
hlfir::Entity savedMask{maybeSaved->first};
mlir::Value shape = hlfir::genShape(loc, builder, savedMask);
whereLoopNest = hlfir::genLoopNest(loc, builder, shape);
- constructStack.push_back(whereLoopNest->outerLoop.getOperation());
- builder.setInsertionPointToStart(whereLoopNest->innerLoop.getBody());
+ constructStack.push_back(whereLoopNest->outerOp);
+ builder.setInsertionPointToStart(whereLoopNest->body);
mlir::Value cdt = hlfir::getElementAt(loc, builder, savedMask,
whereLoopNest->oneBasedIndices);
generateMaskIfOp(cdt);
@@ -527,7 +526,7 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) {
// If this is the same run as the one that saved the value, the clean-up
// was left-over to be done now.
auto insertionPoint = builder.saveInsertionPoint();
- builder.setInsertionPointAfter(whereLoopNest->outerLoop);
+ builder.setInsertionPointAfter(whereLoopNest->outerOp);
generateCleanupIfAny(maybeSaved->second);
builder.restoreInsertionPoint(insertionPoint);
}
@@ -539,8 +538,8 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) {
mask.generateNoneElementalPart(builder, mapper);
mlir::Value shape = mask.generateShape(builder, mapper);
whereLoopNest = hlfir::genLoopNest(loc, builder, shape);
- constructStack.push_back(whereLoopNest->outerLoop.getOperation());
- builder.setInsertionPointToStart(whereLoopNest->innerLoop.getBody());
+ constructStack.push_back(whereLoopNest->outerOp);
+ builder.setInsertionPointToStart(whereLoopNest->body);
mlir::Value cdt = generateMaskedEntity(mask);
generateMaskIfOp(cdt);
return;
@@ -754,7 +753,7 @@ OrderedAssignmentRewriter::generateYieldedLHS(
loweredLhs.vectorSubscriptLoopNest = hlfir::genLoopNest(
loc, builder, loweredLhs.vectorSubscriptShape.value());
builder.setInsertionPointToStart(
- loweredLhs.vectorSubscriptLoopNest->innerLoop.getBody());
+ loweredLhs.vectorSubscriptLoopNest->body);
}
loweredLhs.lhs = temp->second.fetch(loc, builder);
return loweredLhs;
@@ -771,8 +770,7 @@ OrderedAssignmentRewriter::generateYieldedLHS(
loweredLhs.vectorSubscriptLoopNest =
hlfir::genLoopNest(loc, builder, *loweredLhs.vectorSubscriptShape,
!elementalAddrLhs.isOrdered());
- builder.setInsertionPointToStart(
- loweredLhs.vectorSubscriptLoopNest->innerLoop.getBody());
+ builder.setInsertionPointToStart(loweredLhs.vectorSubscriptLoopNest->body);
mapper.map(elementalAddrLhs.getIndices(),
loweredLhs.vectorSubscriptLoopNest->oneBasedIndices);
for (auto &op : elementalAddrLhs.getBody().front().without_terminator())
@@ -798,11 +796,11 @@ OrderedAssignmentRewriter::generateMaskedEntity(MaskedArrayExpr &maskedExpr) {
if (!maskedExpr.noneElementalPartWasGenerated) {
// Generate none elemental part before the where loops (but inside the
// current forall loops if any).
- builder.setInsertionPoint(whereLoopNest->outerLoop);
+ builder.setInsertionPoint(whereLoopNest->outerOp);
maskedExpr.generateNoneElementalPart(builder, mapper);
}
// Generate the none elemental part cleanup after the where loops.
- builder.setInsertionPointAfter(whereLoopNest->outerLoop);
+ builder.setInsertionPointAfter(whereLoopNest->outerOp);
maskedExpr.generateNoneElementalCleanupIfAny(builder, mapper);
// Generate the value of the current element for the masked expression
// at the current insertion point (inside the where loops, and any fir.if
@@ -1242,7 +1240,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide(
LhsValueAndCleanUp loweredLhs = generateYieldedLHS(loc, region);
fir::factory::TemporaryStorage *temp = nullptr;
if (loweredLhs.vectorSubscriptLoopNest)
- constructStack.push_back(loweredLhs.vectorSubscriptLoopNest->outerLoop);
+ constructStack.push_back(loweredLhs.vectorSubscriptLoopNest->outerOp);
if (loweredLhs.vectorSubscriptLoopNest && !rhsIsArray(regionAssignOp)) {
// Vector subscripted entity for which the shape must also be saved on top
// of the element addresses (e.g. the shape may change in each forall
@@ -1265,7 +1263,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide(
// subscripted LHS.
auto &vectorTmp = temp->cast<fir::factory::AnyVectorSubscriptStack>();
auto insertionPoint = builder.saveInsertionPoint();
- builder.setInsertionPoint(loweredLhs.vectorSubscriptLoopNest->outerLoop);
+ builder.setInsertionPoint(loweredLhs.vectorSubscriptLoopNest->outerOp);
vectorTmp.pushShape(loc, builder, shape);
builder.restoreInsertionPoint(insertionPoint);
} else {
@@ -1290,8 +1288,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide(
generateCleanupIfAny(loweredLhs.elementalCleanup);
if (loweredLhs.vectorSubscriptLoopNest) {
constructStack.pop_back();
- builder.setInsertionPointAfter(
- loweredLhs.vectorSubscriptLoopNest->outerLoop);
+ builder.setInsertionPointAfter(loweredLhs.vectorSubscriptLoopNest->outerOp);
}
}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
index 7553e05b470634..3a0a98dc594463 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
@@ -483,7 +483,7 @@ llvm::LogicalResult ElementalAssignBufferization::matchAndRewrite(
// hlfir.elemental region inside the inner loop
hlfir::LoopNest loopNest =
hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered());
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
auto yield = hlfir::inlineElementalOp(loc, builder, elemental,
loopNest.oneBasedIndices);
hlfir::Entity elementValue{yield.getElementValue()};
@@ -554,7 +554,7 @@ llvm::LogicalResult BroadcastAssignBufferization::matchAndRewrite(
hlfir::getIndexExtents(loc, builder, shape);
hlfir::LoopNest loopNest =
hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true);
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
auto arrayElement =
hlfir::getElementAt(loc, builder, lhs, loopNest.oneBasedIndices);
builder.create<hlfir::AssignOp>(loc, rhs, arrayElement);
@@ -649,7 +649,7 @@ llvm::LogicalResult VariableAssignBufferization::matchAndRewrite(
hlfir::getIndexExtents(loc, builder, shape);
hlfir::LoopNest loopNest =
hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true);
- builder.setInsertionPointToStart(loopNest.innerLoop.getBody());
+ builder.setInsertionPointToStart(loopNest.body);
auto rhsArrayElement =
hlfir::getElementAt(loc, builder, rhs, loopNest.oneBasedIndices);
rhsArrayElement = hlfir::loadTrivialScalar(loc, builder, rhsArrayElement);
>From d8cfd384bb47d38c09ed4e1a1cd598b891d0915b Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 20 Oct 2024 01:35:01 +0900
Subject: [PATCH 05/27] genLoopNest fix
---
flang/lib/Optimizer/Builder/HLFIRTools.cpp | 1 -
1 file changed, 1 deletion(-)
diff --git a/flang/lib/Optimizer/Builder/HLFIRTools.cpp b/flang/lib/Optimizer/Builder/HLFIRTools.cpp
index 333331378841ed..7425ccf7fc0e30 100644
--- a/flang/lib/Optimizer/Builder/HLFIRTools.cpp
+++ b/flang/lib/Optimizer/Builder/HLFIRTools.cpp
@@ -878,7 +878,6 @@ hlfir::LoopNest hlfir::genLoopNest(mlir::Location loc,
lnops.loopSteps.push_back(one);
}
auto lnOp = builder.create<mlir::omp::LoopNestOp>(loc, lnops);
- builder.create<mlir::omp::TerminatorOp>(loc);
mlir::Block *block = builder.createBlock(&lnOp.getRegion());
for (auto extent : llvm::reverse(extents))
block->addArgument(extent.getType(), extent.getLoc());
>From 568abc28ee32e4ee05190d32ef86ff73215dbaa8 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 4 Aug 2024 22:06:55 +0900
Subject: [PATCH 06/27] [flang] Lower omp.workshare to other omp constructs
Change to workshare loop wrapper op
Move single op declaration
Schedule pass properly
Correctly handle nested nested loop nests to be parallelized by workshare
Leave comments for shouldUseWorkshareLowering
Use copyprivate to scatter val from omp.single
TODO still need to implement copy function
TODO transitive check for usage outside of omp.single not imiplemented yet
Transitively check for users outisde of single op
TODO need to implement copy func
TODO need to hoist allocas outside of single regions
Add tests
Hoist allocas
More tests
Emit body for copy func
Test the tmp storing logic
Clean up trivially dead ops
Only handle single-block regions for now
Fix tests for custom assembly for loop wrapper
Only run the lower workshare pass if openmp is enabled
Implement some missing functionality
Fix tests
Fix test
Iterate backwards to find all trivially dead ops
Add expalanation comment for createCopyFun
Update test
---
flang/include/flang/Optimizer/OpenMP/Passes.h | 5 +
.../include/flang/Optimizer/OpenMP/Passes.td | 5 +
flang/include/flang/Tools/CrossToolHelpers.h | 1 +
flang/lib/Frontend/FrontendActions.cpp | 10 +-
flang/lib/Optimizer/OpenMP/CMakeLists.txt | 1 +
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 446 ++++++++++++++++++
flang/lib/Optimizer/Passes/Pipelines.cpp | 6 +-
flang/test/Fir/basic-program.fir | 1 +
.../Transforms/OpenMP/lower-workshare.mlir | 189 ++++++++
.../Transforms/OpenMP/lower-workshare2.mlir | 23 +
.../Transforms/OpenMP/lower-workshare3.mlir | 74 +++
.../Transforms/OpenMP/lower-workshare4.mlir | 59 +++
.../Transforms/OpenMP/lower-workshare5.mlir | 42 ++
.../Transforms/OpenMP/lower-workshare6.mlir | 51 ++
flang/tools/bbc/bbc.cpp | 5 +-
flang/tools/tco/tco.cpp | 1 +
16 files changed, 915 insertions(+), 4 deletions(-)
create mode 100644 flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare2.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare3.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare4.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare5.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare6.mlir
diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.h b/flang/include/flang/Optimizer/OpenMP/Passes.h
index 403d79667bf448..feb395f1a12dbd 100644
--- a/flang/include/flang/Optimizer/OpenMP/Passes.h
+++ b/flang/include/flang/Optimizer/OpenMP/Passes.h
@@ -25,6 +25,11 @@ namespace flangomp {
#define GEN_PASS_REGISTRATION
#include "flang/Optimizer/OpenMP/Passes.h.inc"
+/// Impelements the logic specified in the 2.8.3 workshare Construct section of
+/// the OpenMP standard which specifies what statements or constructs shall be
+/// divided into units of work.
+bool shouldUseWorkshareLowering(mlir::Operation *op);
+
} // namespace flangomp
#endif // FORTRAN_OPTIMIZER_OPENMP_PASSES_H
diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.td b/flang/include/flang/Optimizer/OpenMP/Passes.td
index 1c0ce08f5b4838..dc1956bea9fb29 100644
--- a/flang/include/flang/Optimizer/OpenMP/Passes.td
+++ b/flang/include/flang/Optimizer/OpenMP/Passes.td
@@ -37,4 +37,9 @@ def FunctionFilteringPass : Pass<"omp-function-filtering"> {
];
}
+// Needs to be scheduled on Module as we create functions in it
+def LowerWorkshare : Pass<"lower-workshare", "::mlir::ModuleOp"> {
+ let summary = "Lower workshare construct";
+}
+
#endif //FORTRAN_OPTIMIZER_OPENMP_PASSES
diff --git a/flang/include/flang/Tools/CrossToolHelpers.h b/flang/include/flang/Tools/CrossToolHelpers.h
index df4b21ada058fe..d936b739e58157 100644
--- a/flang/include/flang/Tools/CrossToolHelpers.h
+++ b/flang/include/flang/Tools/CrossToolHelpers.h
@@ -123,6 +123,7 @@ struct MLIRToLLVMPassPipelineConfig : public FlangEPCallBacks {
false; ///< Set no-signed-zeros-fp-math attribute for functions.
bool UnsafeFPMath = false; ///< Set unsafe-fp-math attribute for functions.
bool NSWOnLoopVarInc = false; ///< Add nsw flag to loop variable increments.
+ bool EnableOpenMP = false; ///< Enable OpenMP lowering.
};
struct OffloadModuleOpts {
diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp
index f2e460fc53a67f..8c21fe18e67b4d 100644
--- a/flang/lib/Frontend/FrontendActions.cpp
+++ b/flang/lib/Frontend/FrontendActions.cpp
@@ -715,7 +715,11 @@ void CodeGenAction::lowerHLFIRToFIR() {
pm.enableVerifier(/*verifyPasses=*/true);
// Create the pass pipeline
- fir::createHLFIRToFIRPassPipeline(pm, level);
+ fir::createHLFIRToFIRPassPipeline(
+ pm,
+ ci.getInvocation().getFrontendOpts().features.IsEnabled(
+ Fortran::common::LanguageFeature::OpenMP),
+ level);
(void)mlir::applyPassManagerCLOptions(pm);
if (!mlir::succeeded(pm.run(*mlirModule))) {
@@ -828,6 +832,10 @@ void CodeGenAction::generateLLVMIR() {
config.VScaleMax = vsr->second;
}
+ if (ci.getInvocation().getFrontendOpts().features.IsEnabled(
+ Fortran::common::LanguageFeature::OpenMP))
+ config.EnableOpenMP = true;
+
if (ci.getInvocation().getLoweringOpts().getNSWOnLoopVarInc())
config.NSWOnLoopVarInc = true;
diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
index 92051634f0378b..39e92d388288d4 100644
--- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt
+++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
@@ -4,6 +4,7 @@ add_flang_library(FlangOpenMPTransforms
FunctionFiltering.cpp
MapInfoFinalization.cpp
MarkDeclareTarget.cpp
+ LowerWorkshare.cpp
DEPENDS
FIRDialect
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
new file mode 100644
index 00000000000000..6e5538b54ba5e0
--- /dev/null
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -0,0 +1,446 @@
+//===- LowerWorkshare.cpp - special cases for bufferization -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the lowering of omp.workshare to other omp constructs.
+//
+// This pass is tasked with parallelizing the loops nested in
+// workshare.loop_wrapper while both the Fortran to mlir lowering and the hlfir
+// to fir lowering pipelines are responsible for emitting the
+// workshare.loop_wrapper ops where appropriate according to the
+// `shouldUseWorkshareLowering` function.
+//
+//===----------------------------------------------------------------------===//
+
+#include <flang/Optimizer/Builder/FIRBuilder.h>
+#include <flang/Optimizer/Dialect/FIROps.h>
+#include <flang/Optimizer/Dialect/FIRType.h>
+#include <flang/Optimizer/HLFIR/HLFIROps.h>
+#include <flang/Optimizer/OpenMP/Passes.h>
+#include <llvm/ADT/BreadthFirstIterator.h>
+#include <llvm/ADT/STLExtras.h>
+#include <llvm/ADT/SmallVectorExtras.h>
+#include <llvm/ADT/iterator_range.h>
+#include <llvm/Support/ErrorHandling.h>
+#include <mlir/Dialect/Arith/IR/Arith.h>
+#include <mlir/Dialect/LLVMIR/LLVMTypes.h>
+#include <mlir/Dialect/OpenMP/OpenMPClauseOperands.h>
+#include <mlir/Dialect/OpenMP/OpenMPDialect.h>
+#include <mlir/Dialect/SCF/IR/SCF.h>
+#include <mlir/IR/BuiltinOps.h>
+#include <mlir/IR/IRMapping.h>
+#include <mlir/IR/OpDefinition.h>
+#include <mlir/IR/PatternMatch.h>
+#include <mlir/IR/Visitors.h>
+#include <mlir/Interfaces/SideEffectInterfaces.h>
+#include <mlir/Support/LLVM.h>
+#include <mlir/Transforms/GreedyPatternRewriteDriver.h>
+
+#include <variant>
+
+namespace flangomp {
+#define GEN_PASS_DEF_LOWERWORKSHARE
+#include "flang/Optimizer/OpenMP/Passes.h.inc"
+} // namespace flangomp
+
+#define DEBUG_TYPE "lower-workshare"
+
+using namespace mlir;
+
+namespace flangomp {
+
+// Checks for nesting pattern below as we need to avoid sharing the work of
+// statements which are nested in some constructs such as omp.critical or
+// another omp.parallel.
+//
+// omp.workshare { // `wsOp`
+// ...
+// omp.T { // `parent`
+// ...
+// `op`
+//
+template <typename T>
+static bool isNestedIn(omp::WorkshareOp wsOp, Operation *op) {
+ T parent = op->getParentOfType<T>();
+ if (!parent)
+ return false;
+ return wsOp->isProperAncestor(parent);
+}
+
+bool shouldUseWorkshareLowering(Operation *op) {
+ auto parentWorkshare = op->getParentOfType<omp::WorkshareOp>();
+
+ if (!parentWorkshare)
+ return false;
+
+ if (isNestedIn<omp::CriticalOp>(parentWorkshare, op))
+ return false;
+
+ // 2.8.3 workshare Construct
+ // For a parallel construct, the construct is a unit of work with respect to
+ // the workshare construct. The statements contained in the parallel construct
+ // are executed by a new thread team.
+ if (isNestedIn<omp::ParallelOp>(parentWorkshare, op))
+ return false;
+
+ // 2.8.2 single Construct
+ // Binding The binding thread set for a single region is the current team. A
+ // single region binds to the innermost enclosing parallel region.
+ // Description Only one of the encountering threads will execute the
+ // structured block associated with the single construct.
+ if (isNestedIn<omp::SingleOp>(parentWorkshare, op))
+ return false;
+
+ return true;
+}
+
+} // namespace flangomp
+
+namespace {
+
+struct SingleRegion {
+ Block::iterator begin, end;
+};
+
+static bool mustParallelizeOp(Operation *op) {
+ return op
+ ->walk([&](Operation *nested) {
+ // We need to be careful not to pick up workshare.loop_wrapper in nested
+ // omp.parallel{omp.workshare} regions, i.e. make sure that `nested`
+ // binds to the workshare region we are currently handling.
+ //
+ // For example:
+ //
+ // omp.parallel {
+ // omp.workshare { // currently handling this
+ // omp.parallel {
+ // omp.workshare { // nested workshare
+ // omp.workshare.loop_wrapper {}
+ //
+ // Therefore, we skip if we encounter a nested omp.workshare.
+ if (isa<omp::WorkshareOp>(op))
+ return WalkResult::skip();
+ if (isa<omp::WorkshareLoopWrapperOp>(op))
+ return WalkResult::interrupt();
+ return WalkResult::advance();
+ })
+ .wasInterrupted();
+}
+
+static bool isSafeToParallelize(Operation *op) {
+ return isa<hlfir::DeclareOp>(op) || isa<fir::DeclareOp>(op) ||
+ isMemoryEffectFree(op);
+}
+
+/// Simple shallow copies suffice for our purposes in this pass, so we implement
+/// this simpler alternative to the full fledged `createCopyFunc` in the
+/// frontend
+static mlir::func::FuncOp createCopyFunc(mlir::Location loc, mlir::Type varType,
+ fir::FirOpBuilder builder) {
+ mlir::ModuleOp module = builder.getModule();
+ auto rt = cast<fir::ReferenceType>(varType);
+ mlir::Type eleTy = rt.getEleTy();
+ std::string copyFuncName =
+ fir::getTypeAsString(eleTy, builder.getKindMap(), "_workshare_copy");
+
+ if (auto decl = module.lookupSymbol<mlir::func::FuncOp>(copyFuncName))
+ return decl;
+ // create function
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::OpBuilder modBuilder(module.getBodyRegion());
+ llvm::SmallVector<mlir::Type> argsTy = {varType, varType};
+ auto funcType = mlir::FunctionType::get(builder.getContext(), argsTy, {});
+ mlir::func::FuncOp funcOp =
+ modBuilder.create<mlir::func::FuncOp>(loc, copyFuncName, funcType);
+ funcOp.setVisibility(mlir::SymbolTable::Visibility::Private);
+ builder.createBlock(&funcOp.getRegion(), funcOp.getRegion().end(), argsTy,
+ {loc, loc});
+ builder.setInsertionPointToStart(&funcOp.getRegion().back());
+
+ Value loaded = builder.create<fir::LoadOp>(loc, funcOp.getArgument(0));
+ builder.create<fir::StoreOp>(loc, loaded, funcOp.getArgument(1));
+
+ builder.create<mlir::func::ReturnOp>(loc);
+ return funcOp;
+}
+
+static bool isUserOutsideSR(Operation *user, Operation *parentOp,
+ SingleRegion sr) {
+ while (user->getParentOp() != parentOp)
+ user = user->getParentOp();
+ return sr.begin->getBlock() != user->getBlock() ||
+ !(user->isBeforeInBlock(&*sr.end) && sr.begin->isBeforeInBlock(user));
+}
+
+static bool isTransitivelyUsedOutside(Value v, SingleRegion sr) {
+ Block *srBlock = sr.begin->getBlock();
+ Operation *parentOp = srBlock->getParentOp();
+
+ for (auto &use : v.getUses()) {
+ Operation *user = use.getOwner();
+ if (isUserOutsideSR(user, parentOp, sr))
+ return true;
+
+ // Results of nested users cannot be used outside of the SR
+ if (user->getBlock() != srBlock)
+ continue;
+
+ // A non-safe to parallelize operation will be handled separately
+ if (!isSafeToParallelize(user))
+ continue;
+
+ for (auto res : user->getResults())
+ if (isTransitivelyUsedOutside(res, sr))
+ return true;
+ }
+ return false;
+}
+
+/// We clone pure operations in both the parallel and single blocks. this
+/// functions cleans them up if they end up with no uses
+static void cleanupBlock(Block *block) {
+ for (Operation &op : llvm::make_early_inc_range(
+ llvm::make_range(block->rbegin(), block->rend())))
+ if (isOpTriviallyDead(&op))
+ op.erase();
+}
+
+static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
+ IRMapping &rootMapping, Location loc,
+ mlir::DominanceInfo &di) {
+ OpBuilder rootBuilder(sourceRegion.getContext());
+ ModuleOp m = sourceRegion.getParentOfType<ModuleOp>();
+ OpBuilder copyFuncBuilder(m.getBodyRegion());
+ fir::FirOpBuilder firCopyFuncBuilder(copyFuncBuilder, m);
+
+ auto mapReloadedValue =
+ [&](Value v, OpBuilder allocaBuilder, OpBuilder singleBuilder,
+ OpBuilder parallelBuilder, IRMapping singleMapping) -> Value {
+ if (auto reloaded = rootMapping.lookupOrNull(v))
+ return nullptr;
+ Type ty = v.getType();
+ Value alloc = allocaBuilder.create<fir::AllocaOp>(loc, ty);
+ singleBuilder.create<fir::StoreOp>(loc, singleMapping.lookup(v), alloc);
+ Value reloaded = parallelBuilder.create<fir::LoadOp>(loc, ty, alloc);
+ rootMapping.map(v, reloaded);
+ return alloc;
+ };
+
+ auto moveToSingle = [&](SingleRegion sr, OpBuilder allocaBuilder,
+ OpBuilder singleBuilder,
+ OpBuilder parallelBuilder) -> SmallVector<Value> {
+ IRMapping singleMapping = rootMapping;
+ SmallVector<Value> copyPrivate;
+
+ for (Operation &op : llvm::make_range(sr.begin, sr.end)) {
+ if (isSafeToParallelize(&op)) {
+ singleBuilder.clone(op, singleMapping);
+ parallelBuilder.clone(op, rootMapping);
+ } else if (auto alloca = dyn_cast<fir::AllocaOp>(&op)) {
+ auto hoisted =
+ cast<fir::AllocaOp>(allocaBuilder.clone(*alloca, singleMapping));
+ rootMapping.map(&*alloca, &*hoisted);
+ rootMapping.map(alloca.getResult(), hoisted.getResult());
+ copyPrivate.push_back(hoisted);
+ } else {
+ singleBuilder.clone(op, singleMapping);
+ // Prepare reloaded values for results of operations that cannot be
+ // safely parallelized and which are used after the region `sr`
+ for (auto res : op.getResults()) {
+ if (isTransitivelyUsedOutside(res, sr)) {
+ auto alloc = mapReloadedValue(res, allocaBuilder, singleBuilder,
+ parallelBuilder, singleMapping);
+ if (alloc)
+ copyPrivate.push_back(alloc);
+ }
+ }
+ }
+ }
+ singleBuilder.create<omp::TerminatorOp>(loc);
+ return copyPrivate;
+ };
+
+ for (Block &block : sourceRegion) {
+ Block *targetBlock = rootBuilder.createBlock(
+ &targetRegion, {}, block.getArgumentTypes(),
+ llvm::map_to_vector(block.getArguments(),
+ [](BlockArgument arg) { return arg.getLoc(); }));
+ rootMapping.map(&block, targetBlock);
+ rootMapping.map(block.getArguments(), targetBlock->getArguments());
+ }
+
+ auto handleOneBlock = [&](Block &block) {
+ Block &targetBlock = *rootMapping.lookup(&block);
+ rootBuilder.setInsertionPointToStart(&targetBlock);
+ Operation *terminator = block.getTerminator();
+ SmallVector<std::variant<SingleRegion, Operation *>> regions;
+
+ auto it = block.begin();
+ auto getOneRegion = [&]() {
+ if (&*it == terminator)
+ return false;
+ if (mustParallelizeOp(&*it)) {
+ regions.push_back(&*it);
+ it++;
+ return true;
+ }
+ SingleRegion sr;
+ sr.begin = it;
+ while (&*it != terminator && !mustParallelizeOp(&*it))
+ it++;
+ sr.end = it;
+ assert(sr.begin != sr.end);
+ regions.push_back(sr);
+ return true;
+ };
+ while (getOneRegion())
+ ;
+
+ for (auto [i, opOrSingle] : llvm::enumerate(regions)) {
+ bool isLast = i + 1 == regions.size();
+ if (std::holds_alternative<SingleRegion>(opOrSingle)) {
+ OpBuilder singleBuilder(sourceRegion.getContext());
+ Block *singleBlock = new Block();
+ singleBuilder.setInsertionPointToStart(singleBlock);
+
+ OpBuilder allocaBuilder(sourceRegion.getContext());
+ Block *allocaBlock = new Block();
+ allocaBuilder.setInsertionPointToStart(allocaBlock);
+
+ OpBuilder parallelBuilder(sourceRegion.getContext());
+ Block *parallelBlock = new Block();
+ parallelBuilder.setInsertionPointToStart(parallelBlock);
+
+ omp::SingleOperands singleOperands;
+ if (isLast)
+ singleOperands.nowait = rootBuilder.getUnitAttr();
+ singleOperands.copyprivateVars =
+ moveToSingle(std::get<SingleRegion>(opOrSingle), allocaBuilder,
+ singleBuilder, parallelBuilder);
+ cleanupBlock(singleBlock);
+ for (auto var : singleOperands.copyprivateVars) {
+ mlir::func::FuncOp funcOp =
+ createCopyFunc(loc, var.getType(), firCopyFuncBuilder);
+ singleOperands.copyprivateSyms.push_back(SymbolRefAttr::get(funcOp));
+ }
+ omp::SingleOp singleOp =
+ rootBuilder.create<omp::SingleOp>(loc, singleOperands);
+ singleOp.getRegion().push_back(singleBlock);
+ rootBuilder.getInsertionBlock()->getOperations().splice(
+ rootBuilder.getInsertionPoint(), parallelBlock->getOperations());
+ targetRegion.front().getOperations().splice(
+ singleOp->getIterator(), allocaBlock->getOperations());
+ delete allocaBlock;
+ delete parallelBlock;
+ } else {
+ auto op = std::get<Operation *>(opOrSingle);
+ if (auto wslw = dyn_cast<omp::WorkshareLoopWrapperOp>(op)) {
+ omp::WsloopOperands wsloopOperands;
+ if (isLast)
+ wsloopOperands.nowait = rootBuilder.getUnitAttr();
+ auto wsloop =
+ rootBuilder.create<mlir::omp::WsloopOp>(loc, wsloopOperands);
+ auto clonedWslw = cast<omp::WorkshareLoopWrapperOp>(
+ rootBuilder.clone(*wslw, rootMapping));
+ wsloop.getRegion().takeBody(clonedWslw.getRegion());
+ clonedWslw->erase();
+ } else {
+ assert(mustParallelizeOp(op));
+ Operation *cloned = rootBuilder.cloneWithoutRegions(*op, rootMapping);
+ for (auto [region, clonedRegion] :
+ llvm::zip(op->getRegions(), cloned->getRegions()))
+ parallelizeRegion(region, clonedRegion, rootMapping, loc, di);
+ }
+ }
+ }
+
+ rootBuilder.clone(*block.getTerminator(), rootMapping);
+ };
+
+ if (sourceRegion.hasOneBlock()) {
+ handleOneBlock(sourceRegion.front());
+ } else {
+ auto &domTree = di.getDomTree(&sourceRegion);
+ for (auto node : llvm::breadth_first(domTree.getRootNode())) {
+ handleOneBlock(*node->getBlock());
+ }
+ }
+
+ for (Block &targetBlock : targetRegion)
+ cleanupBlock(&targetBlock);
+}
+
+/// Lowers workshare to a sequence of single-thread regions and parallel loops
+///
+/// For example:
+///
+/// omp.workshare {
+/// %a = fir.allocmem
+/// omp.workshare.loop_wrapper {}
+/// fir.call Assign %b %a
+/// fir.freemem %a
+/// }
+///
+/// becomes
+///
+/// %tmp = fir.alloca
+/// omp.single copyprivate(%tmp) {
+/// %a = fir.allocmem
+/// fir.store %a %tmp
+/// }
+/// %a_reloaded = fir.load %tmp
+/// omp.workshare.loop_wrapper {}
+/// omp.single {
+/// fir.call Assign %b %a_reloaded
+/// fir.freemem %a_reloaded
+/// }
+///
+/// Note that we allocate temporary memory for values in omp.single's which need
+/// to be accessed by all threads and broadcast them using single's copyprivate
+LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
+ Location loc = wsOp->getLoc();
+ IRMapping rootMapping;
+
+ OpBuilder rootBuilder(wsOp);
+
+ // This operation is just a placeholder which will be erased later. We need it
+ // because our `parallelizeRegion` function works on regions and not blocks.
+ omp::WorkshareOp newOp =
+ rootBuilder.create<omp::WorkshareOp>(loc, omp::WorkshareOperands());
+ if (!wsOp.getNowait())
+ rootBuilder.create<omp::BarrierOp>(loc);
+
+ parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di);
+
+ if (wsOp.getRegion().getBlocks().size() != 1)
+ return failure();
+
+ // Inline the contents of the placeholder workshare op into its parent block.
+ Block *theBlock = &newOp.getRegion().front();
+ Operation *term = theBlock->getTerminator();
+ Block *parentBlock = wsOp->getBlock();
+ parentBlock->getOperations().splice(newOp->getIterator(),
+ theBlock->getOperations());
+ assert(term->getNumOperands() == 0);
+ term->erase();
+ newOp->erase();
+ wsOp->erase();
+ return success();
+}
+
+class LowerWorksharePass
+ : public flangomp::impl::LowerWorkshareBase<LowerWorksharePass> {
+public:
+ void runOnOperation() override {
+ mlir::DominanceInfo &di = getAnalysis<mlir::DominanceInfo>();
+ getOperation()->walk([&](mlir::omp::WorkshareOp wsOp) {
+ if (failed(lowerWorkshare(wsOp, di)))
+ signalPassFailure();
+ });
+ }
+};
+} // namespace
diff --git a/flang/lib/Optimizer/Passes/Pipelines.cpp b/flang/lib/Optimizer/Passes/Pipelines.cpp
index 3fa5c54403bd8c..c1a5902b747887 100644
--- a/flang/lib/Optimizer/Passes/Pipelines.cpp
+++ b/flang/lib/Optimizer/Passes/Pipelines.cpp
@@ -212,7 +212,7 @@ void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm,
/// \param pm - MLIR pass manager that will hold the pipeline definition
/// \param optLevel - optimization level used for creating FIR optimization
/// passes pipeline
-void createHLFIRToFIRPassPipeline(mlir::PassManager &pm,
+void createHLFIRToFIRPassPipeline(mlir::PassManager &pm, bool enableOpenMP,
llvm::OptimizationLevel optLevel) {
if (optLevel.isOptimizingForSpeed()) {
addCanonicalizerPassWithoutRegionSimplification(pm);
@@ -230,6 +230,8 @@ void createHLFIRToFIRPassPipeline(mlir::PassManager &pm,
pm.addPass(hlfir::createLowerHLFIRIntrinsics());
pm.addPass(hlfir::createBufferizeHLFIR());
pm.addPass(hlfir::createConvertHLFIRtoFIR());
+ if (enableOpenMP)
+ pm.addPass(flangomp::createLowerWorkshare());
}
/// Create a pass pipeline for handling certain OpenMP transformations needed
@@ -302,7 +304,7 @@ void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm,
void createMLIRToLLVMPassPipeline(mlir::PassManager &pm,
MLIRToLLVMPassPipelineConfig &config,
llvm::StringRef inputFilename) {
- fir::createHLFIRToFIRPassPipeline(pm, config.OptLevel);
+ fir::createHLFIRToFIRPassPipeline(pm, config.EnableOpenMP, config.OptLevel);
// Add default optimizer pass pipeline.
fir::createDefaultFIROptimizerPassPipeline(pm, config);
diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir
index bca454c13ff9cc..4b18acb7c2b430 100644
--- a/flang/test/Fir/basic-program.fir
+++ b/flang/test/Fir/basic-program.fir
@@ -47,6 +47,7 @@ func.func @_QQmain() {
// PASSES-NEXT: LowerHLFIRIntrinsics
// PASSES-NEXT: BufferizeHLFIR
// PASSES-NEXT: ConvertHLFIRtoFIR
+// PASSES-NEXT: LowerWorkshare
// PASSES-NEXT: CSE
// PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd
// PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
diff --git a/flang/test/Transforms/OpenMP/lower-workshare.mlir b/flang/test/Transforms/OpenMP/lower-workshare.mlir
new file mode 100644
index 00000000000000..a609ee5d3d6c2a
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare.mlir
@@ -0,0 +1,189 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// checks:
+// nowait on final omp.single
+func.func @wsfunc(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ omp.parallel {
+ omp.workshare {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %0 = fir.shape %c42 : (index) -> !fir.shape<1>
+ %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+ %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+ %true = arith.constant true
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ %8 = fir.load %7 : !fir.ref<i32>
+ %9 = arith.subi %8, %c1_i32 : i32
+ %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref<i32>
+ omp.yield
+ }
+ omp.terminator
+ }
+ %4 = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ %5 = fir.insert_value %4, %true, [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ hlfir.assign %3#0 to %1#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+ fir.freemem %3#0 : !fir.heap<!fir.array<42xi32>>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// -----
+
+// checks:
+// fir.alloca hoisted out and copyprivate'd
+func.func @wsfunc(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ omp.workshare {
+ %c1_i32 = arith.constant 1 : i32
+ %alloc = fir.alloca i32
+ fir.store %c1_i32 to %alloc : !fir.ref<i32>
+ %c42 = arith.constant 42 : index
+ %0 = fir.shape %c42 : (index) -> !fir.shape<1>
+ %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+ %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+ %true = arith.constant true
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ %8 = fir.load %7 : !fir.ref<i32>
+ %ld = fir.load %alloc : !fir.ref<i32>
+ %n8 = arith.subi %8, %ld : i32
+ %9 = arith.subi %n8, %c1_i32 : i32
+ %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref<i32>
+ omp.yield
+ }
+ omp.terminator
+ }
+ %4 = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ %5 = fir.insert_value %4, %true, [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ "test.test1"(%alloc) : (!fir.ref<i32>) -> ()
+ hlfir.assign %3#0 to %1#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+ fir.freemem %3#0 : !fir.heap<!fir.array<42xi32>>
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @wsfunc(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
+// CHECK: omp.parallel {
+// CHECK: %[[VAL_1:.*]] = fir.alloca !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_heap_42xi32 : !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
+// CHECK: %[[VAL_2:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+// CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]](%[[VAL_3]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_7:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+// CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_7]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_9]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]](%[[VAL_9]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: %[[VAL_13:.*]] = arith.constant 1 : index
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_14:.*]]) : index = (%[[VAL_13]]) to (%[[VAL_7]]) inclusive step (%[[VAL_13]]) {
+// CHECK: %[[VAL_15:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_14]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref<i32>
+// CHECK: %[[VAL_17:.*]] = arith.subi %[[VAL_16]], %[[VAL_8]] : i32
+// CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_14]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_18]] temporary_lhs : i32, !fir.ref<i32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.single nowait {
+// CHECK: hlfir.assign %[[VAL_12]]#0 to %[[VAL_10]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+// CHECK: fir.freemem %[[VAL_12]]#0 : !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func private @_workshare_copy_i32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @wsfunc(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
+// CHECK: %[[VAL_1:.*]] = fir.alloca i32
+// CHECK: %[[VAL_2:.*]] = fir.alloca !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_i32 : !fir.ref<i32>, %[[VAL_2]] -> @_workshare_copy_heap_42xi32 : !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
+// CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32
+// CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<i32>
+// CHECK: %[[VAL_4:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_5]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_7:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+// CHECK: fir.store %[[VAL_7]] to %[[VAL_2]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]](%[[VAL_5]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+// CHECK: %[[VAL_10:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_11]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_2]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_13]](%[[VAL_11]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: %[[VAL_15:.*]] = arith.constant 1 : index
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_16:.*]]) : index = (%[[VAL_15]]) to (%[[VAL_10]]) inclusive step (%[[VAL_15]]) {
+// CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_16]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
+// CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+// CHECK: %[[VAL_20:.*]] = arith.subi %[[VAL_18]], %[[VAL_19]] : i32
+// CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_9]] : i32
+// CHECK: %[[VAL_22:.*]] = hlfir.designate %[[VAL_14]]#0 (%[[VAL_16]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_22]] temporary_lhs : i32, !fir.ref<i32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.single nowait {
+// CHECK: "test.test1"(%[[VAL_1]]) : (!fir.ref<i32>) -> ()
+// CHECK: hlfir.assign %[[VAL_14]]#0 to %[[VAL_12]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+// CHECK: fir.freemem %[[VAL_14]]#0 : !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: return
+// CHECK: }
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare2.mlir b/flang/test/Transforms/OpenMP/lower-workshare2.mlir
new file mode 100644
index 00000000000000..940662e0bdccc2
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare2.mlir
@@ -0,0 +1,23 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that we correctly handle nowait
+
+// CHECK-LABEL: func.func @nonowait
+func.func @nonowait(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ // CHECK: omp.barrier
+ omp.workshare {
+ omp.terminator
+ }
+ return
+}
+
+// -----
+
+// CHECK-LABEL: func.func @nowait
+func.func @nowait(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ // CHECK-NOT: omp.barrier
+ omp.workshare nowait {
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare3.mlir b/flang/test/Transforms/OpenMP/lower-workshare3.mlir
new file mode 100644
index 00000000000000..5a3d583527fddb
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare3.mlir
@@ -0,0 +1,74 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+
+// Check if we store the correct values
+
+func.func @wsfunc() {
+ omp.parallel {
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK-NOT: fir.alloca
+ omp.workshare {
+
+ %t1 = "test.test1"() : () -> i32
+ // CHECK: %[[T1:.*]] = "test.test1"
+ // CHECK: fir.store %[[T1]]
+ %t2 = "test.test2"() : () -> i32
+ // CHECK: %[[T2:.*]] = "test.test2"
+ // CHECK: fir.store %[[T2]]
+ %t3 = "test.test3"() : () -> i32
+ // CHECK: %[[T3:.*]] = "test.test3"
+ // CHECK-NOT: fir.store %[[T3]]
+ %t4 = "test.test4"() : () -> i32
+ // CHECK: %[[T4:.*]] = "test.test4"
+ // CHECK: fir.store %[[T4]]
+ %t5 = "test.test5"() : () -> i32
+ // CHECK: %[[T5:.*]] = "test.test5"
+ // CHECK: fir.store %[[T5]]
+ %t6 = "test.test6"() : () -> i32
+ // CHECK: %[[T6:.*]] = "test.test6"
+ // CHECK-NOT: fir.store %[[T6]]
+
+
+ "test.test1"(%t1) : (i32) -> ()
+ "test.test1"(%t2) : (i32) -> ()
+ "test.test1"(%t3) : (i32) -> ()
+
+ %true = arith.constant true
+ fir.if %true {
+ "test.test2"(%t3) : (i32) -> ()
+ }
+
+ %c1_i32 = arith.constant 1 : i32
+
+ %t5_pure_use = arith.addi %t5, %c1_i32 : i32
+
+ %t6_mem_effect_use = "test.test8"(%t6) : (i32) -> i32
+ // CHECK: %[[T6_USE:.*]] = "test.test8"
+ // CHECK: fir.store %[[T6_USE]]
+
+ %c42 = arith.constant 42 : index
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test10"(%t1) : (i32) -> ()
+ "test.test10"(%t5_pure_use) : (i32) -> ()
+ "test.test10"(%t6_mem_effect_use) : (i32) -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+
+ "test.test10"(%t2) : (i32) -> ()
+ fir.if %true {
+ "test.test10"(%t4) : (i32) -> ()
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare4.mlir b/flang/test/Transforms/OpenMP/lower-workshare4.mlir
new file mode 100644
index 00000000000000..02fe90097008db
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare4.mlir
@@ -0,0 +1,59 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that we cleanup unused pure operations from the parallel and single
+// regions
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = fir.alloca i32
+// CHECK: omp.parallel {
+// CHECK: omp.single {
+// CHECK: %[[VAL_1:.*]] = "test.test1"() : () -> i32
+// CHECK: %[[VAL_2:.*]] = arith.constant 2 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 3 : index
+// CHECK: %[[VAL_4:.*]] = arith.addi %[[VAL_2]], %[[VAL_3]] : index
+// CHECK: "test.test3"(%[[VAL_4]]) : (index) -> ()
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_6:.*]] = arith.constant 42 : index
+// CHECK: omp.wsloop nowait {
+// CHECK: omp.loop_nest (%[[VAL_7:.*]]) : index = (%[[VAL_5]]) to (%[[VAL_6]]) inclusive step (%[[VAL_5]]) {
+// CHECK: "test.test2"() : () -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ %t1 = "test.test1"() : () -> i32
+
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+
+ %c2 = arith.constant 2 : index
+ %c3 = arith.constant 3 : index
+ %add = arith.addi %c2, %c3 : index
+ "test.test3"(%add) : (index) -> ()
+
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test2"() : () -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare5.mlir b/flang/test/Transforms/OpenMP/lower-workshare5.mlir
new file mode 100644
index 00000000000000..177f8aa8f86c7c
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare5.mlir
@@ -0,0 +1,42 @@
+// XFAIL: *
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// TODO we can lower these but we have no guarantee that the parent of
+// omp.workshare supports multi-block regions, thus we fail for now.
+
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb3(%arg1: i32):
+ "test.test2"(%arg1) : (i32) -> ()
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// -----
+
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb2:
+ "test.test2"(%r) : (i32) -> ()
+ omp.terminator
+ ^bb3(%arg1: i32):
+ %r = "test.test2"(%arg1) : (i32) -> i32
+ cf.br ^bb2
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare6.mlir b/flang/test/Transforms/OpenMP/lower-workshare6.mlir
new file mode 100644
index 00000000000000..48379470e92562
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare6.mlir
@@ -0,0 +1,51 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Checks that the omp.workshare.loop_wrapper binds to the correct omp.workshare
+
+func.func @wsfunc() {
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+ omp.parallel {
+ omp.workshare nowait {
+ omp.parallel {
+ omp.workshare nowait {
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test2"() : () -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_1:.*]] = arith.constant 42 : index
+// CHECK: omp.parallel {
+// CHECK: omp.single nowait {
+// CHECK: omp.parallel {
+// CHECK: omp.wsloop nowait {
+// CHECK: omp.loop_nest (%[[VAL_2:.*]]) : index = (%[[VAL_0]]) to (%[[VAL_1]]) inclusive step (%[[VAL_0]]) {
+// CHECK: "test.test2"() : () -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
diff --git a/flang/tools/bbc/bbc.cpp b/flang/tools/bbc/bbc.cpp
index fe5e36f704c76c..1c24979bbcdafb 100644
--- a/flang/tools/bbc/bbc.cpp
+++ b/flang/tools/bbc/bbc.cpp
@@ -452,7 +452,8 @@ static llvm::LogicalResult convertFortranSourceToMLIR(
if (emitFIR && useHLFIR) {
// lower HLFIR to FIR
- fir::createHLFIRToFIRPassPipeline(pm, llvm::OptimizationLevel::O2);
+ fir::createHLFIRToFIRPassPipeline(pm, enableOpenMP,
+ llvm::OptimizationLevel::O2);
if (mlir::failed(pm.run(mlirModule))) {
llvm::errs() << "FATAL: lowering from HLFIR to FIR failed";
return mlir::failure();
@@ -467,6 +468,8 @@ static llvm::LogicalResult convertFortranSourceToMLIR(
// Add O2 optimizer pass pipeline.
MLIRToLLVMPassPipelineConfig config(llvm::OptimizationLevel::O2);
+ if (enableOpenMP)
+ config.EnableOpenMP = true;
config.NSWOnLoopVarInc = setNSW;
fir::registerDefaultInlinerPass(config);
fir::createDefaultFIROptimizerPassPipeline(pm, config);
diff --git a/flang/tools/tco/tco.cpp b/flang/tools/tco/tco.cpp
index 5c373c4e85258c..eaf4bae0884546 100644
--- a/flang/tools/tco/tco.cpp
+++ b/flang/tools/tco/tco.cpp
@@ -139,6 +139,7 @@ compileFIR(const mlir::PassPipelineCLParser &passPipeline) {
return mlir::failure();
} else {
MLIRToLLVMPassPipelineConfig config(llvm::OptimizationLevel::O2);
+ config.EnableOpenMP = true; // assume the input contains OpenMP
config.AliasAnalysis = true; // enabled when optimizing for speed
if (codeGenLLVM) {
// Run only CodeGen passes.
>From 1152749adc6f254d2bed42ee05d1e6d10d2df653 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Mon, 23 Sep 2024 15:07:48 +0900
Subject: [PATCH 07/27] Emit a proper error message for CFG in workshare
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 13 +++++-
.../OpenMP/lower-workshare-todo-cfg-dom.mlir | 23 ++++++++++
.../OpenMP/lower-workshare-todo-cfg.mlir | 20 +++++++++
.../Transforms/OpenMP/lower-workshare5.mlir | 42 -------------------
4 files changed, 55 insertions(+), 43 deletions(-)
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
delete mode 100644 flang/test/Transforms/OpenMP/lower-workshare5.mlir
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index 6e5538b54ba5e0..cf1867311cc236 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -16,6 +16,7 @@
//
//===----------------------------------------------------------------------===//
+#include "flang/Optimizer/Builder/Todo.h"
#include <flang/Optimizer/Builder/FIRBuilder.h>
#include <flang/Optimizer/Dialect/FIROps.h>
#include <flang/Optimizer/Dialect/FIRType.h>
@@ -416,8 +417,18 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di);
+ // FIXME Currently, we only support workshare constructs with structured
+ // control flow. The transformation itself supports CFG, however, once we
+ // transform the MLIR region in the omp.workshare, we need to inline that
+ // region in the parent block. We have no guarantees at this point of the
+ // pipeline that the parent op supports CFG (e.g. fir.if), thus this is not
+ // generally possible. The alternative is to put the lowered region in an
+ // operation akin to scf.execute_region, which will get lowered at the same
+ // time when fir ops get lowered to CFG. However, SCF is not registered in
+ // flang so we cannot use it. Remove this requirement once we have
+ // scf.execute_region or an alternative operation available.
if (wsOp.getRegion().getBlocks().size() != 1)
- return failure();
+ TODO(wsOp->getLoc(), "omp workshare with unstructured control flow");
// Inline the contents of the placeholder workshare op into its parent block.
Block *theBlock = &newOp.getRegion().front();
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
new file mode 100644
index 00000000000000..1c47d448f597d9
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -0,0 +1,23 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+
+// CHECK: not yet implemented: omp workshare with unstructured control flow
+
+// Check that the definition of %r dominates its use post-transform
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb2:
+ "test.test2"(%r) : (i32) -> ()
+ omp.terminator
+ ^bb3(%arg1: i32):
+ %r = "test.test2"(%arg1) : (i32) -> i32
+ cf.br ^bb2
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
new file mode 100644
index 00000000000000..bf6c196a05b4a3
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -0,0 +1,20 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+
+// CHECK: not yet implemented: omp workshare with unstructured control flow
+
+// Check transforming a simple CFG
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb3(%arg1: i32):
+ "test.test2"(%arg1) : (i32) -> ()
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare5.mlir b/flang/test/Transforms/OpenMP/lower-workshare5.mlir
deleted file mode 100644
index 177f8aa8f86c7c..00000000000000
--- a/flang/test/Transforms/OpenMP/lower-workshare5.mlir
+++ /dev/null
@@ -1,42 +0,0 @@
-// XFAIL: *
-// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
-
-// TODO we can lower these but we have no guarantee that the parent of
-// omp.workshare supports multi-block regions, thus we fail for now.
-
-func.func @wsfunc() {
- %a = fir.alloca i32
- omp.parallel {
- omp.workshare {
- ^bb1:
- %c1 = arith.constant 1 : i32
- cf.br ^bb3(%c1: i32)
- ^bb3(%arg1: i32):
- "test.test2"(%arg1) : (i32) -> ()
- omp.terminator
- }
- omp.terminator
- }
- return
-}
-
-// -----
-
-func.func @wsfunc() {
- %a = fir.alloca i32
- omp.parallel {
- omp.workshare {
- ^bb1:
- %c1 = arith.constant 1 : i32
- cf.br ^bb3(%c1: i32)
- ^bb2:
- "test.test2"(%r) : (i32) -> ()
- omp.terminator
- ^bb3(%arg1: i32):
- %r = "test.test2"(%arg1) : (i32) -> i32
- cf.br ^bb2
- }
- omp.terminator
- }
- return
-}
>From e3130f1e82bc1dfe72b9e191553df59cfe86ca52 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Mon, 23 Sep 2024 15:44:23 +0900
Subject: [PATCH 08/27] Cleanup tests
---
.../OpenMP/lower-workshare-alloca.mlir | 55 +++++
...are6.mlir => lower-workshare-binding.mlir} | 0
...are4.mlir => lower-workshare-cleanup.mlir} | 0
....mlir => lower-workshare-copyprivate.mlir} | 0
...hare2.mlir => lower-workshare-nowait.mlir} | 0
.../Transforms/OpenMP/lower-workshare.mlir | 189 ------------------
6 files changed, 55 insertions(+), 189 deletions(-)
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
rename flang/test/Transforms/OpenMP/{lower-workshare6.mlir => lower-workshare-binding.mlir} (100%)
rename flang/test/Transforms/OpenMP/{lower-workshare4.mlir => lower-workshare-cleanup.mlir} (100%)
rename flang/test/Transforms/OpenMP/{lower-workshare3.mlir => lower-workshare-copyprivate.mlir} (100%)
rename flang/test/Transforms/OpenMP/{lower-workshare2.mlir => lower-workshare-nowait.mlir} (100%)
delete mode 100644 flang/test/Transforms/OpenMP/lower-workshare.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
new file mode 100644
index 00000000000000..d1bef3a359e487
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
@@ -0,0 +1,55 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Checks that fir.alloca is hoisted out and copyprivate'd
+func.func @wsfunc() {
+ omp.workshare {
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %alloc = fir.alloca i32
+ fir.store %c1_i32 to %alloc : !fir.ref<i32>
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test1"(%alloc) : (!fir.ref<i32>) -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+ "test.test2"(%alloc) : (!fir.ref<i32>) -> ()
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func.func private @_workshare_copy_i32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = fir.alloca i32
+// CHECK: omp.single copyprivate(%[[VAL_0]] -> @_workshare_copy_i32 : !fir.ref<i32>) {
+// CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32
+// CHECK: fir.store %[[VAL_1]] to %[[VAL_0]] : !fir.ref<i32>
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_2:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 42 : index
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_4:.*]]) : index = (%[[VAL_2]]) to (%[[VAL_3]]) inclusive step (%[[VAL_2]]) {
+// CHECK: "test.test1"(%[[VAL_0]]) : (!fir.ref<i32>) -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.single nowait {
+// CHECK: "test.test2"(%[[VAL_0]]) : (!fir.ref<i32>) -> ()
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: return
+// CHECK: }
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare6.mlir b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
similarity index 100%
rename from flang/test/Transforms/OpenMP/lower-workshare6.mlir
rename to flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare4.mlir b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
similarity index 100%
rename from flang/test/Transforms/OpenMP/lower-workshare4.mlir
rename to flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare3.mlir b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
similarity index 100%
rename from flang/test/Transforms/OpenMP/lower-workshare3.mlir
rename to flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare2.mlir b/flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir
similarity index 100%
rename from flang/test/Transforms/OpenMP/lower-workshare2.mlir
rename to flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare.mlir b/flang/test/Transforms/OpenMP/lower-workshare.mlir
deleted file mode 100644
index a609ee5d3d6c2a..00000000000000
--- a/flang/test/Transforms/OpenMP/lower-workshare.mlir
+++ /dev/null
@@ -1,189 +0,0 @@
-// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
-
-// checks:
-// nowait on final omp.single
-func.func @wsfunc(%arg0: !fir.ref<!fir.array<42xi32>>) {
- omp.parallel {
- omp.workshare {
- %c42 = arith.constant 42 : index
- %c1_i32 = arith.constant 1 : i32
- %0 = fir.shape %c42 : (index) -> !fir.shape<1>
- %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
- %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
- %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
- %true = arith.constant true
- %c1 = arith.constant 1 : index
- omp.workshare.loop_wrapper {
- omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
- %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
- %8 = fir.load %7 : !fir.ref<i32>
- %9 = arith.subi %8, %c1_i32 : i32
- %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
- hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref<i32>
- omp.yield
- }
- omp.terminator
- }
- %4 = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
- %5 = fir.insert_value %4, %true, [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
- %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
- hlfir.assign %3#0 to %1#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
- fir.freemem %3#0 : !fir.heap<!fir.array<42xi32>>
- omp.terminator
- }
- omp.terminator
- }
- return
-}
-
-// -----
-
-// checks:
-// fir.alloca hoisted out and copyprivate'd
-func.func @wsfunc(%arg0: !fir.ref<!fir.array<42xi32>>) {
- omp.workshare {
- %c1_i32 = arith.constant 1 : i32
- %alloc = fir.alloca i32
- fir.store %c1_i32 to %alloc : !fir.ref<i32>
- %c42 = arith.constant 42 : index
- %0 = fir.shape %c42 : (index) -> !fir.shape<1>
- %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
- %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
- %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
- %true = arith.constant true
- %c1 = arith.constant 1 : index
- omp.workshare.loop_wrapper {
- omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
- %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
- %8 = fir.load %7 : !fir.ref<i32>
- %ld = fir.load %alloc : !fir.ref<i32>
- %n8 = arith.subi %8, %ld : i32
- %9 = arith.subi %n8, %c1_i32 : i32
- %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
- hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref<i32>
- omp.yield
- }
- omp.terminator
- }
- %4 = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
- %5 = fir.insert_value %4, %true, [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
- %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
- "test.test1"(%alloc) : (!fir.ref<i32>) -> ()
- hlfir.assign %3#0 to %1#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
- fir.freemem %3#0 : !fir.heap<!fir.array<42xi32>>
- omp.terminator
- }
- return
-}
-
-// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>,
-// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
-// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: return
-// CHECK: }
-
-// CHECK-LABEL: func.func @wsfunc(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
-// CHECK: omp.parallel {
-// CHECK: %[[VAL_1:.*]] = fir.alloca !fir.heap<!fir.array<42xi32>>
-// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_heap_42xi32 : !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
-// CHECK: %[[VAL_2:.*]] = arith.constant 42 : index
-// CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1>
-// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
-// CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
-// CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]](%[[VAL_3]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: %[[VAL_7:.*]] = arith.constant 42 : index
-// CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
-// CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_7]] : (index) -> !fir.shape<1>
-// CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_9]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
-// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]](%[[VAL_9]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
-// CHECK: %[[VAL_13:.*]] = arith.constant 1 : index
-// CHECK: omp.wsloop {
-// CHECK: omp.loop_nest (%[[VAL_14:.*]]) : index = (%[[VAL_13]]) to (%[[VAL_7]]) inclusive step (%[[VAL_13]]) {
-// CHECK: %[[VAL_15:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_14]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
-// CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref<i32>
-// CHECK: %[[VAL_17:.*]] = arith.subi %[[VAL_16]], %[[VAL_8]] : i32
-// CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_14]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
-// CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_18]] temporary_lhs : i32, !fir.ref<i32>
-// CHECK: omp.yield
-// CHECK: }
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: omp.single nowait {
-// CHECK: hlfir.assign %[[VAL_12]]#0 to %[[VAL_10]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
-// CHECK: fir.freemem %[[VAL_12]]#0 : !fir.heap<!fir.array<42xi32>>
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: omp.barrier
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: return
-// CHECK: }
-
-// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>,
-// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
-// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: return
-// CHECK: }
-
-// CHECK-LABEL: func.func private @_workshare_copy_i32(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
-// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
-// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
-// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
-// CHECK: return
-// CHECK: }
-
-// CHECK-LABEL: func.func @wsfunc(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
-// CHECK: %[[VAL_1:.*]] = fir.alloca i32
-// CHECK: %[[VAL_2:.*]] = fir.alloca !fir.heap<!fir.array<42xi32>>
-// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_i32 : !fir.ref<i32>, %[[VAL_2]] -> @_workshare_copy_heap_42xi32 : !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
-// CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32
-// CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<i32>
-// CHECK: %[[VAL_4:.*]] = arith.constant 42 : index
-// CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
-// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_5]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
-// CHECK: %[[VAL_7:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
-// CHECK: fir.store %[[VAL_7]] to %[[VAL_2]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]](%[[VAL_5]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
-// CHECK: %[[VAL_10:.*]] = arith.constant 42 : index
-// CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1>
-// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_11]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
-// CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_2]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_13]](%[[VAL_11]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
-// CHECK: %[[VAL_15:.*]] = arith.constant 1 : index
-// CHECK: omp.wsloop {
-// CHECK: omp.loop_nest (%[[VAL_16:.*]]) : index = (%[[VAL_15]]) to (%[[VAL_10]]) inclusive step (%[[VAL_15]]) {
-// CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_16]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
-// CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
-// CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
-// CHECK: %[[VAL_20:.*]] = arith.subi %[[VAL_18]], %[[VAL_19]] : i32
-// CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_9]] : i32
-// CHECK: %[[VAL_22:.*]] = hlfir.designate %[[VAL_14]]#0 (%[[VAL_16]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
-// CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_22]] temporary_lhs : i32, !fir.ref<i32>
-// CHECK: omp.yield
-// CHECK: }
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: omp.single nowait {
-// CHECK: "test.test1"(%[[VAL_1]]) : (!fir.ref<i32>) -> ()
-// CHECK: hlfir.assign %[[VAL_14]]#0 to %[[VAL_12]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
-// CHECK: fir.freemem %[[VAL_14]]#0 : !fir.heap<!fir.array<42xi32>>
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: omp.barrier
-// CHECK: return
-// CHECK: }
-
>From 75b213f57e4012887a7f5036573636ddea88a83f Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Mon, 23 Sep 2024 16:25:55 +0900
Subject: [PATCH 09/27] Fix todo tests
---
flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 2 +-
flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
index 1c47d448f597d9..d10996167ae623 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -1,4 +1,4 @@
-// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
// CHECK: not yet implemented: omp workshare with unstructured control flow
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
index bf6c196a05b4a3..46d2a8e8d48a8a 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -1,4 +1,4 @@
-// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
// CHECK: not yet implemented: omp workshare with unstructured control flow
>From b227891307941a7376858b1d3a699108bead1fb2 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 14:21:14 +0900
Subject: [PATCH 10/27] Fix dst src in copy function
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index cf1867311cc236..baf8346e7608a9 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -162,8 +162,8 @@ static mlir::func::FuncOp createCopyFunc(mlir::Location loc, mlir::Type varType,
{loc, loc});
builder.setInsertionPointToStart(&funcOp.getRegion().back());
- Value loaded = builder.create<fir::LoadOp>(loc, funcOp.getArgument(0));
- builder.create<fir::StoreOp>(loc, loaded, funcOp.getArgument(1));
+ Value loaded = builder.create<fir::LoadOp>(loc, funcOp.getArgument(1));
+ builder.create<fir::StoreOp>(loc, loaded, funcOp.getArgument(0));
builder.create<mlir::func::ReturnOp>(loc);
return funcOp;
>From 676bf68aa1ca532300070874622e49da1cbbc25a Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 14:38:48 +0900
Subject: [PATCH 11/27] Use omp.single to handle CFG cases
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 77 +++++++++++++------
1 file changed, 53 insertions(+), 24 deletions(-)
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index baf8346e7608a9..34399abbcd20ea 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -16,7 +16,6 @@
//
//===----------------------------------------------------------------------===//
-#include "flang/Optimizer/Builder/Todo.h"
#include <flang/Optimizer/Builder/FIRBuilder.h>
#include <flang/Optimizer/Dialect/FIROps.h>
#include <flang/Optimizer/Dialect/FIRType.h>
@@ -39,7 +38,6 @@
#include <mlir/IR/Visitors.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include <mlir/Support/LLVM.h>
-#include <mlir/Transforms/GreedyPatternRewriteDriver.h>
#include <variant>
@@ -96,6 +94,12 @@ bool shouldUseWorkshareLowering(Operation *op) {
if (isNestedIn<omp::SingleOp>(parentWorkshare, op))
return false;
+ if (parentWorkshare.getRegion().getBlocks().size() != 1) {
+ parentWorkshare->emitWarning(
+ "omp workshare with unstructured control flow currently unsupported.");
+ return false;
+ }
+
return true;
}
@@ -408,15 +412,6 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
OpBuilder rootBuilder(wsOp);
- // This operation is just a placeholder which will be erased later. We need it
- // because our `parallelizeRegion` function works on regions and not blocks.
- omp::WorkshareOp newOp =
- rootBuilder.create<omp::WorkshareOp>(loc, omp::WorkshareOperands());
- if (!wsOp.getNowait())
- rootBuilder.create<omp::BarrierOp>(loc);
-
- parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di);
-
// FIXME Currently, we only support workshare constructs with structured
// control flow. The transformation itself supports CFG, however, once we
// transform the MLIR region in the omp.workshare, we need to inline that
@@ -427,19 +422,53 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
// time when fir ops get lowered to CFG. However, SCF is not registered in
// flang so we cannot use it. Remove this requirement once we have
// scf.execute_region or an alternative operation available.
- if (wsOp.getRegion().getBlocks().size() != 1)
- TODO(wsOp->getLoc(), "omp workshare with unstructured control flow");
-
- // Inline the contents of the placeholder workshare op into its parent block.
- Block *theBlock = &newOp.getRegion().front();
- Operation *term = theBlock->getTerminator();
- Block *parentBlock = wsOp->getBlock();
- parentBlock->getOperations().splice(newOp->getIterator(),
- theBlock->getOperations());
- assert(term->getNumOperands() == 0);
- term->erase();
- newOp->erase();
- wsOp->erase();
+ if (wsOp.getRegion().getBlocks().size() == 1) {
+ // This operation is just a placeholder which will be erased later. We need
+ // it because our `parallelizeRegion` function works on regions and not
+ // blocks.
+ omp::WorkshareOp newOp =
+ rootBuilder.create<omp::WorkshareOp>(loc, omp::WorkshareOperands());
+ if (!wsOp.getNowait())
+ rootBuilder.create<omp::BarrierOp>(loc);
+
+ parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc,
+ di);
+
+ // Inline the contents of the placeholder workshare op into its parent
+ // block.
+ Block *theBlock = &newOp.getRegion().front();
+ Operation *term = theBlock->getTerminator();
+ Block *parentBlock = wsOp->getBlock();
+ parentBlock->getOperations().splice(newOp->getIterator(),
+ theBlock->getOperations());
+ assert(term->getNumOperands() == 0);
+ term->erase();
+ newOp->erase();
+ wsOp->erase();
+ } else {
+ // Otherwise just change the operation to an omp.single.
+
+ // `shouldUseWorkshareLowering` should have guaranteed that there are no
+ // omp.workshare_loop_wrapper's that bind to this omp.workshare.
+ assert(!wsOp->walk([&](Operation *op) {
+ // Nested omp.workshare can have their own
+ // omp.workshare_loop_wrapper's.
+ if (isa<omp::WorkshareOp>(op))
+ return WalkResult::skip();
+ if (isa<omp::WorkshareLoopWrapperOp>(op))
+ return WalkResult::interrupt();
+ return WalkResult::advance();
+ })
+ .wasInterrupted());
+
+ omp::SingleOperands operands;
+ operands.nowait = wsOp.getNowaitAttr();
+ omp::SingleOp newOp = rootBuilder.create<omp::SingleOp>(loc, operands);
+
+ newOp.getRegion().getBlocks().splice(newOp.getRegion().getBlocks().begin(),
+ wsOp.getRegion().getBlocks());
+ wsOp->erase();
+ }
return success();
}
>From 4d20893efca8c9c87d42a0d5324b945dac118155 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 15:12:14 +0900
Subject: [PATCH 12/27] Fix lower workshare tests
---
flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir | 4 ++--
.../Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 7 ++++---
flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 7 ++++---
3 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
index d1bef3a359e487..618b8d9c19b6b1 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
@@ -24,8 +24,8 @@ func.func @wsfunc() {
// CHECK-LABEL: func.func private @_workshare_copy_i32(
// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
-// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
-// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_0]] : !fir.ref<i32>
// CHECK: return
// CHECK: }
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
index d10996167ae623..62d9da6c520f85 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -1,8 +1,9 @@
-// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
-// CHECK: not yet implemented: omp workshare with unstructured control flow
+// CHECK: omp.parallel
+// CHECK-NEXT: omp.single
-// Check that the definition of %r dominates its use post-transform
+// TODO Check that the definition of %r dominates its use post-transform
func.func @wsfunc() {
%a = fir.alloca i32
omp.parallel {
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
index 46d2a8e8d48a8a..d9551eb99f0762 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -1,8 +1,9 @@
-// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
-// CHECK: not yet implemented: omp workshare with unstructured control flow
+// CHECK: omp.parallel
+// CHECK-NEXT: omp.single
-// Check transforming a simple CFG
+// TODO Check transforming a simple CFG
func.func @wsfunc() {
%a = fir.alloca i32
omp.parallel {
>From 5760383d7a4b491ea51dc4cfebdfd43d39bfba07 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 15:28:07 +0900
Subject: [PATCH 13/27] Different warning
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 9 +++++----
.../Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 2 ++
.../test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 2 ++
3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index 34399abbcd20ea..4d8e2a9a067141 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -94,11 +94,9 @@ bool shouldUseWorkshareLowering(Operation *op) {
if (isNestedIn<omp::SingleOp>(parentWorkshare, op))
return false;
- if (parentWorkshare.getRegion().getBlocks().size() != 1) {
- parentWorkshare->emitWarning(
- "omp workshare with unstructured control flow currently unsupported.");
+ // Do not use workshare lowering until we support CFG in omp.workshare
+ if (parentWorkshare.getRegion().getBlocks().size() != 1)
return false;
- }
return true;
}
@@ -448,6 +446,9 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
} else {
// Otherwise just change the operation to an omp.single.
+ wsOp->emitWarning("omp workshare with unstructured control flow currently "
+ "unsupported and will be serialized.");
+
// `shouldUseWorkshareLowering` should have guaranteed that there are no
// omp.workshare_loop_wrapper's that bind to this omp.workshare.
assert(!wsOp->walk([&](Operation *op) {
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
index 62d9da6c520f85..96dc878bed0c99 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -1,5 +1,7 @@
// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized.
+
// CHECK: omp.parallel
// CHECK-NEXT: omp.single
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
index d9551eb99f0762..ce8a4eb96982be 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -1,5 +1,7 @@
// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized.
+
// CHECK: omp.parallel
// CHECK-NEXT: omp.single
>From 71d13e248166908d98533b267fad16e4ff12caa2 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 22:45:09 +0900
Subject: [PATCH 14/27] Fix bug and add better clarification comments
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 28 ++++++++++++++++---
.../lower-workshare-correct-parallelize.mlir | 16 +++++++++++
2 files changed, 40 insertions(+), 4 deletions(-)
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index 4d8e2a9a067141..84cf5e82167987 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -35,6 +35,7 @@
#include <mlir/IR/IRMapping.h>
#include <mlir/IR/OpDefinition.h>
#include <mlir/IR/PatternMatch.h>
+#include <mlir/IR/Value.h>
#include <mlir/IR/Visitors.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include <mlir/Support/LLVM.h>
@@ -188,14 +189,19 @@ static bool isTransitivelyUsedOutside(Value v, SingleRegion sr) {
if (isUserOutsideSR(user, parentOp, sr))
return true;
- // Results of nested users cannot be used outside of the SR
+ // Now we know user is inside `sr`.
+
+ // Results of nested users cannot be used outside of `sr`.
if (user->getBlock() != srBlock)
continue;
- // A non-safe to parallelize operation will be handled separately
+ // A non-safe to parallelize operation will be checked for uses outside
+ // separately.
if (!isSafeToParallelize(user))
continue;
+ // For safe to parallelize operations, we need to check if there is a
+ // transitive use of `v` through them.
for (auto res : user->getResults())
if (isTransitivelyUsedOutside(res, sr))
return true;
@@ -242,7 +248,21 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
for (Operation &op : llvm::make_range(sr.begin, sr.end)) {
if (isSafeToParallelize(&op)) {
singleBuilder.clone(op, singleMapping);
- parallelBuilder.clone(op, rootMapping);
+ if (llvm::all_of(op.getOperands(), [&](Value opr) {
+ return rootMapping.contains(opr);
+ })) {
+ // Safe to parallelize operations which have all operands available in
+ // the root parallel block can be executed there.
+ parallelBuilder.clone(op, rootMapping);
+ } else {
+ // If any operand was not available, it means that there was no
+ // transitive use of a non-safe-to-parallelize operation outside `sr`.
+ // This means that there should be no transitive uses outside `sr` of
+ // `op`.
+ assert(llvm::all_of(op.getResults(), [&](Value v) {
+ return !isTransitivelyUsedOutside(v, sr);
+ }));
+ }
} else if (auto alloca = dyn_cast<fir::AllocaOp>(&op)) {
auto hoisted =
cast<fir::AllocaOp>(allocaBuilder.clone(*alloca, singleMapping));
@@ -252,7 +272,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
} else {
singleBuilder.clone(op, singleMapping);
// Prepare reloaded values for results of operations that cannot be
- // safely parallelized and which are used after the region `sr`
+ // safely parallelized and which are used after the region `sr`.
for (auto res : op.getResults()) {
if (isTransitivelyUsedOutside(res, sr)) {
auto alloc = mapReloadedValue(res, allocaBuilder, singleBuilder,
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
new file mode 100644
index 00000000000000..99ca4fe5a0e212
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
@@ -0,0 +1,16 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that the safe to parallelize `fir.declare` op will not be parallelized
+// due to its operand %alloc not being reloaded outside the omp.single.
+
+func.func @foo() {
+ %c0 = arith.constant 0 : index
+ omp.workshare {
+ %alloc = fir.allocmem !fir.array<?xf32>, %c0 {bindc_name = ".tmp.forall", uniq_name = ""}
+ %shape = fir.shape %c0 : (index) -> !fir.shape<1>
+ %declare = fir.declare %alloc(%shape) {uniq_name = ".tmp.forall"} : (!fir.heap<!fir.array<?xf32>>, !fir.shape<1>) -> !fir.heap<!fir.array<?xf32>>
+ fir.freemem %alloc : !fir.heap<!fir.array<?xf32>>
+ omp.terminator
+ }
+ return
+}
>From 487305670f8fffc81c37a39635c08fdcad0734f9 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 22:48:42 +0900
Subject: [PATCH 15/27] Fix message
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index 84cf5e82167987..a91f64f04a30aa 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -466,8 +466,9 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
} else {
// Otherwise just change the operation to an omp.single.
- wsOp->emitWarning("omp workshare with unstructured control flow currently "
- "unsupported and will be serialized.");
+ wsOp->emitWarning(
+ "omp workshare with unstructured control flow is currently "
+ "unsupported and will be serialized.");
// `shouldUseWorkshareLowering` should have guaranteed that there are no
// omp.workshare_loop_wrapper's that bind to this omp.workshare.
>From 15f8d3da3fa4b468049752ec57d6b02362a12786 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sat, 5 Oct 2024 12:57:48 +0900
Subject: [PATCH 16/27] Fix tests
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 7 ++++++-
.../OpenMP/lower-workshare-correct-parallelize.mlir | 9 +++++++++
.../Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 2 +-
.../test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 2 +-
4 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index a91f64f04a30aa..aa4371b3af6f7d 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -249,7 +249,12 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
if (isSafeToParallelize(&op)) {
singleBuilder.clone(op, singleMapping);
if (llvm::all_of(op.getOperands(), [&](Value opr) {
- return rootMapping.contains(opr);
+ // Either we have already remapped it
+ bool remapped = rootMapping.contains(opr);
+ // Or it is available because it dominates `sr`
+ bool dominates =
+ di.properlyDominates(opr.getDefiningOp(), &*sr.begin);
+ return remapped || dominates;
})) {
// Safe to parallelize operations which have all operands available in
// the root parallel block can be executed there.
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
index 99ca4fe5a0e212..31db8213b5f001 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
@@ -14,3 +14,12 @@ func.func @foo() {
}
return
}
+
+// CHECK: omp.single nowait
+// CHECK: fir.allocmem
+// CHECK: fir.shape
+// CHECK: fir.declare
+// CHECK: fir.freemem
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
index 96dc878bed0c99..83c49cd635d082 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -1,6 +1,6 @@
// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
-// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized.
+// CHECK: warning: omp workshare with unstructured control flow is currently unsupported and will be serialized.
// CHECK: omp.parallel
// CHECK-NEXT: omp.single
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
index ce8a4eb96982be..a27cf880694014 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -1,6 +1,6 @@
// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
-// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized.
+// CHECK: warning: omp workshare with unstructured control flow is currently unsupported and will be serialized.
// CHECK: omp.parallel
// CHECK-NEXT: omp.single
>From b52a6f9e3815c41a00afff1f987b6725a44f93cb Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sat, 19 Oct 2024 23:32:27 +0900
Subject: [PATCH 17/27] Do not emit empty omp.single's
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 50 ++++++++++++-------
.../OpenMP/lower-workshare-no-single.mlir | 20 ++++++++
2 files changed, 52 insertions(+), 18 deletions(-)
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index aa4371b3af6f7d..225c585a02d913 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -239,11 +239,12 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
return alloc;
};
- auto moveToSingle = [&](SingleRegion sr, OpBuilder allocaBuilder,
- OpBuilder singleBuilder,
- OpBuilder parallelBuilder) -> SmallVector<Value> {
+ auto moveToSingle =
+ [&](SingleRegion sr, OpBuilder allocaBuilder, OpBuilder singleBuilder,
+ OpBuilder parallelBuilder) -> std::pair<bool, SmallVector<Value>> {
IRMapping singleMapping = rootMapping;
SmallVector<Value> copyPrivate;
+ bool allParallelized = true;
for (Operation &op : llvm::make_range(sr.begin, sr.end)) {
if (isSafeToParallelize(&op)) {
@@ -267,6 +268,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
assert(llvm::all_of(op.getResults(), [&](Value v) {
return !isTransitivelyUsedOutside(v, sr);
}));
+ allParallelized = false;
}
} else if (auto alloca = dyn_cast<fir::AllocaOp>(&op)) {
auto hoisted =
@@ -274,6 +276,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
rootMapping.map(&*alloca, &*hoisted);
rootMapping.map(alloca.getResult(), hoisted.getResult());
copyPrivate.push_back(hoisted);
+ allParallelized = false;
} else {
singleBuilder.clone(op, singleMapping);
// Prepare reloaded values for results of operations that cannot be
@@ -286,10 +289,11 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
copyPrivate.push_back(alloc);
}
}
+ allParallelized = false;
}
}
singleBuilder.create<omp::TerminatorOp>(loc);
- return copyPrivate;
+ return {allParallelized, copyPrivate};
};
for (Block &block : sourceRegion) {
@@ -343,25 +347,35 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
Block *parallelBlock = new Block();
parallelBuilder.setInsertionPointToStart(parallelBlock);
- omp::SingleOperands singleOperands;
- if (isLast)
- singleOperands.nowait = rootBuilder.getUnitAttr();
- singleOperands.copyprivateVars =
+ auto [allParallelized, copyprivateVars] =
moveToSingle(std::get<SingleRegion>(opOrSingle), allocaBuilder,
singleBuilder, parallelBuilder);
- cleanupBlock(singleBlock);
- for (auto var : singleOperands.copyprivateVars) {
- mlir::func::FuncOp funcOp =
- createCopyFunc(loc, var.getType(), firCopyFuncBuilder);
- singleOperands.copyprivateSyms.push_back(SymbolRefAttr::get(funcOp));
+ if (allParallelized) {
+ // The single region was not required as all operations were safe to
+ // parallelize
+ assert(copyprivateVars.empty());
+ assert(allocaBlock->empty());
+ delete singleBlock;
+ } else {
+ omp::SingleOperands singleOperands;
+ if (isLast)
+ singleOperands.nowait = rootBuilder.getUnitAttr();
+ singleOperands.copyprivateVars = copyprivateVars;
+ cleanupBlock(singleBlock);
+ for (auto var : singleOperands.copyprivateVars) {
+ mlir::func::FuncOp funcOp =
+ createCopyFunc(loc, var.getType(), firCopyFuncBuilder);
+ singleOperands.copyprivateSyms.push_back(
+ SymbolRefAttr::get(funcOp));
+ }
+ omp::SingleOp singleOp =
+ rootBuilder.create<omp::SingleOp>(loc, singleOperands);
+ singleOp.getRegion().push_back(singleBlock);
+ targetRegion.front().getOperations().splice(
+ singleOp->getIterator(), allocaBlock->getOperations());
}
- omp::SingleOp singleOp =
- rootBuilder.create<omp::SingleOp>(loc, singleOperands);
- singleOp.getRegion().push_back(singleBlock);
rootBuilder.getInsertionBlock()->getOperations().splice(
rootBuilder.getInsertionPoint(), parallelBlock->getOperations());
- targetRegion.front().getOperations().splice(
- singleOp->getIterator(), allocaBlock->getOperations());
delete allocaBlock;
delete parallelBlock;
} else {
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
new file mode 100644
index 00000000000000..3e73816e63ace3
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
@@ -0,0 +1,20 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that we do not emit an omp.single for the constant operation
+
+func.func @foo() {
+ omp.workshare {
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c1) inclusive step (%c1) {
+ "test.test0"() : () -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-NOT: omp.single
>From 21128e7ec08a12c85832e13bc5ad6ca63d5a975e Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 20 Oct 2024 01:34:12 +0900
Subject: [PATCH 18/27] LowerWorkshare tests
---
flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir | 2 --
flang/test/Transforms/OpenMP/lower-workshare-binding.mlir | 2 --
flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir | 2 --
flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir | 1 -
flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir | 1 -
5 files changed, 8 deletions(-)
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
index 618b8d9c19b6b1..12b0558d06ed58 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
@@ -13,7 +13,6 @@ func.func @wsfunc() {
"test.test1"(%alloc) : (!fir.ref<i32>) -> ()
omp.yield
}
- omp.terminator
}
"test.test2"(%alloc) : (!fir.ref<i32>) -> ()
omp.terminator
@@ -43,7 +42,6 @@ func.func @wsfunc() {
// CHECK: "test.test1"(%[[VAL_0]]) : (!fir.ref<i32>) -> ()
// CHECK: omp.yield
// CHECK: }
-// CHECK: omp.terminator
// CHECK: }
// CHECK: omp.single nowait {
// CHECK: "test.test2"(%[[VAL_0]]) : (!fir.ref<i32>) -> ()
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
index 48379470e92562..f1d0e8e2296140 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
@@ -14,7 +14,6 @@ func.func @wsfunc() {
"test.test2"() : () -> ()
omp.yield
}
- omp.terminator
}
omp.terminator
}
@@ -38,7 +37,6 @@ func.func @wsfunc() {
// CHECK: "test.test2"() : () -> ()
// CHECK: omp.yield
// CHECK: }
-// CHECK: omp.terminator
// CHECK: }
// CHECK: omp.terminator
// CHECK: }
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
index 02fe90097008db..ca288917a3ac49 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
@@ -21,7 +21,6 @@
// CHECK: "test.test2"() : () -> ()
// CHECK: omp.yield
// CHECK: }
-// CHECK: omp.terminator
// CHECK: }
// CHECK: omp.barrier
// CHECK: omp.terminator
@@ -47,7 +46,6 @@ func.func @wsfunc() {
"test.test2"() : () -> ()
omp.yield
}
- omp.terminator
}
omp.terminator
}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
index 5a3d583527fddb..d7a04e198ceed9 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
@@ -59,7 +59,6 @@ func.func @wsfunc() {
"test.test10"(%t6_mem_effect_use) : (i32) -> ()
omp.yield
}
- omp.terminator
}
"test.test10"(%t2) : (i32) -> ()
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
index 3e73816e63ace3..1fd379a6e5eb48 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir
@@ -10,7 +10,6 @@ func.func @foo() {
"test.test0"() : () -> ()
omp.yield
}
- omp.terminator
}
omp.terminator
}
>From e62341dffa15e15c2e307fe45ef8c33233e63cc7 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 20 Oct 2024 01:35:11 +0900
Subject: [PATCH 19/27] pipelines fix
---
flang/include/flang/Optimizer/Passes/Pipelines.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/flang/include/flang/Optimizer/Passes/Pipelines.h b/flang/include/flang/Optimizer/Passes/Pipelines.h
index 3b54ac38838587..55fafc2e6b36fe 100644
--- a/flang/include/flang/Optimizer/Passes/Pipelines.h
+++ b/flang/include/flang/Optimizer/Passes/Pipelines.h
@@ -123,7 +123,8 @@ void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm,
/// \param optLevel - optimization level used for creating FIR optimization
/// passes pipeline
void createHLFIRToFIRPassPipeline(
- mlir::PassManager &pm, llvm::OptimizationLevel optLevel = defaultOptLevel);
+ mlir::PassManager &pm, bool enableOpenMP,
+ llvm::OptimizationLevel optLevel = defaultOptLevel);
/// Create a pass pipeline for handling certain OpenMP transformations needed
/// prior to FIR lowering.
>From 688eead46bcec5a517050b1eb9aaa9bfe0f88285 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 4 Aug 2024 17:33:52 +0900
Subject: [PATCH 20/27] Add workshare loop wrapper lowerings
Bufferize test
Bufferize test
Bufferize test
Add test for should use workshare lowering
---
.../HLFIR/Transforms/BufferizeHLFIR.cpp | 4 +-
.../Transforms/OptimizedBufferization.cpp | 10 +-
flang/test/HLFIR/bufferize-workshare.fir | 58 ++++++++
.../OpenMP/should-use-workshare-lowering.mlir | 140 ++++++++++++++++++
4 files changed, 208 insertions(+), 4 deletions(-)
create mode 100644 flang/test/HLFIR/bufferize-workshare.fir
create mode 100644 flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
index 07794828fce267..1848dbe2c7a2c2 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp
@@ -26,6 +26,7 @@
#include "flang/Optimizer/HLFIR/HLFIRDialect.h"
#include "flang/Optimizer/HLFIR/HLFIROps.h"
#include "flang/Optimizer/HLFIR/Passes.h"
+#include "flang/Optimizer/OpenMP/Passes.h"
#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
#include "mlir/IR/Dominance.h"
#include "mlir/IR/PatternMatch.h"
@@ -792,7 +793,8 @@ struct ElementalOpConversion
// Generate a loop nest looping around the fir.elemental shape and clone
// fir.elemental region inside the inner loop.
hlfir::LoopNest loopNest =
- hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered());
+ hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered(),
+ flangomp::shouldUseWorkshareLowering(elemental));
auto insPt = builder.saveInsertionPoint();
builder.setInsertionPointToStart(loopNest.body);
auto yield = hlfir::inlineElementalOp(loc, builder, elemental,
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
index 3a0a98dc594463..f014724861e333 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
@@ -20,6 +20,7 @@
#include "flang/Optimizer/HLFIR/HLFIRDialect.h"
#include "flang/Optimizer/HLFIR/HLFIROps.h"
#include "flang/Optimizer/HLFIR/Passes.h"
+#include "flang/Optimizer/OpenMP/Passes.h"
#include "flang/Optimizer/Transforms/Utils.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Dominance.h"
@@ -482,7 +483,8 @@ llvm::LogicalResult ElementalAssignBufferization::matchAndRewrite(
// Generate a loop nest looping around the hlfir.elemental shape and clone
// hlfir.elemental region inside the inner loop
hlfir::LoopNest loopNest =
- hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered());
+ hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered(),
+ flangomp::shouldUseWorkshareLowering(elemental));
builder.setInsertionPointToStart(loopNest.body);
auto yield = hlfir::inlineElementalOp(loc, builder, elemental,
loopNest.oneBasedIndices);
@@ -553,7 +555,8 @@ llvm::LogicalResult BroadcastAssignBufferization::matchAndRewrite(
llvm::SmallVector<mlir::Value> extents =
hlfir::getIndexExtents(loc, builder, shape);
hlfir::LoopNest loopNest =
- hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true);
+ hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true,
+ flangomp::shouldUseWorkshareLowering(assign));
builder.setInsertionPointToStart(loopNest.body);
auto arrayElement =
hlfir::getElementAt(loc, builder, lhs, loopNest.oneBasedIndices);
@@ -648,7 +651,8 @@ llvm::LogicalResult VariableAssignBufferization::matchAndRewrite(
llvm::SmallVector<mlir::Value> extents =
hlfir::getIndexExtents(loc, builder, shape);
hlfir::LoopNest loopNest =
- hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true);
+ hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true,
+ flangomp::shouldUseWorkshareLowering(assign));
builder.setInsertionPointToStart(loopNest.body);
auto rhsArrayElement =
hlfir::getElementAt(loc, builder, rhs, loopNest.oneBasedIndices);
diff --git a/flang/test/HLFIR/bufferize-workshare.fir b/flang/test/HLFIR/bufferize-workshare.fir
new file mode 100644
index 00000000000000..9b7341ae43398a
--- /dev/null
+++ b/flang/test/HLFIR/bufferize-workshare.fir
@@ -0,0 +1,58 @@
+// RUN: fir-opt --bufferize-hlfir %s | FileCheck %s
+
+// CHECK-LABEL: func.func @simple(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
+// CHECK: omp.parallel {
+// CHECK: omp.workshare {
+// CHECK: %[[VAL_1:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_2:.*]] = arith.constant 1 : i32
+// CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]](%[[VAL_3]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: %[[VAL_7:.*]] = arith.constant true
+// CHECK: %[[VAL_8:.*]] = arith.constant 1 : index
+// CHECK: omp.workshare.loop_wrapper {
+// CHECK: omp.loop_nest (%[[VAL_9:.*]]) : index = (%[[VAL_8]]) to (%[[VAL_1]]) inclusive step (%[[VAL_8]]) {
+// CHECK: %[[VAL_10:.*]] = hlfir.designate %[[VAL_4]]#0 (%[[VAL_9]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_10]] : !fir.ref<i32>
+// CHECK: %[[VAL_12:.*]] = arith.subi %[[VAL_11]], %[[VAL_2]] : i32
+// CHECK: %[[VAL_13:.*]] = hlfir.designate %[[VAL_6]]#0 (%[[VAL_9]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: hlfir.assign %[[VAL_12]] to %[[VAL_13]] temporary_lhs : i32, !fir.ref<i32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_14:.*]] = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
+// CHECK: %[[VAL_15:.*]] = fir.insert_value %[[VAL_14]], %[[VAL_7]], [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+// CHECK: %[[VAL_16:.*]] = fir.insert_value %[[VAL_15]], %[[VAL_6]]#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+// CHECK: hlfir.assign %[[VAL_6]]#0 to %[[VAL_4]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+// CHECK: fir.freemem %[[VAL_6]]#0 : !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+func.func @simple(%arg: !fir.ref<!fir.array<42xi32>>) {
+ omp.parallel {
+ omp.workshare {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ %ref = hlfir.designate %array#0 (%i) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ %val = fir.load %ref : !fir.ref<i32>
+ %sub = arith.subi %val, %c1_i32 : i32
+ hlfir.yield_element %sub : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir b/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir
new file mode 100644
index 00000000000000..229fe592a02b9b
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir
@@ -0,0 +1,140 @@
+// RUN: fir-opt --bufferize-hlfir %s | FileCheck %s
+
+// Checks that we correctly identify when to use the lowering to
+// omp.workshare.loop_wrapper
+
+// CHECK-LABEL: @should_parallelize_0
+// CHECK: omp.workshare.loop_wrapper
+func.func @should_parallelize_0(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
+ omp.workshare {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ hlfir.yield_element %c1_i32 : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: @should_parallelize_1
+// CHECK: omp.workshare.loop_wrapper
+func.func @should_parallelize_1(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
+ omp.parallel {
+ omp.workshare {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ hlfir.yield_element %c1_i32 : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+
+// CHECK-LABEL: @should_not_parallelize_0
+// CHECK-NOT: omp.workshare.loop_wrapper
+func.func @should_not_parallelize_0(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
+ omp.workshare {
+ omp.single {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ hlfir.yield_element %c1_i32 : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: @should_not_parallelize_1
+// CHECK-NOT: omp.workshare.loop_wrapper
+func.func @should_not_parallelize_1(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
+ omp.workshare {
+ omp.critical {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ hlfir.yield_element %c1_i32 : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: @should_not_parallelize_2
+// CHECK-NOT: omp.workshare.loop_wrapper
+func.func @should_not_parallelize_2(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
+ omp.workshare {
+ omp.parallel {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ hlfir.yield_element %c1_i32 : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: @should_not_parallelize_3
+// CHECK-NOT: omp.workshare.loop_wrapper
+func.func @should_not_parallelize_3(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
+ omp.workshare {
+ omp.parallel {
+ omp.workshare {
+ omp.parallel {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ hlfir.yield_element %c1_i32 : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
>From 304ec01b7cfd6f0b1044567f54fbf780756be1cd Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Mon, 23 Sep 2024 12:56:11 +0900
Subject: [PATCH 21/27] Add integration test for workshare
---
flang/test/Integration/OpenMP/workshare.f90 | 57 +++++++++++++++++++++
1 file changed, 57 insertions(+)
create mode 100644 flang/test/Integration/OpenMP/workshare.f90
diff --git a/flang/test/Integration/OpenMP/workshare.f90 b/flang/test/Integration/OpenMP/workshare.f90
new file mode 100644
index 00000000000000..0c4524f8552906
--- /dev/null
+++ b/flang/test/Integration/OpenMP/workshare.f90
@@ -0,0 +1,57 @@
+!===----------------------------------------------------------------------===!
+! This directory can be used to add Integration tests involving multiple
+! stages of the compiler (for eg. from Fortran to LLVM IR). It should not
+! contain executable tests. We should only add tests here sparingly and only
+! if there is no other way to test. Repeat this message in each test that is
+! added to this directory and sub-directories.
+!===----------------------------------------------------------------------===!
+
+!RUN: %flang_fc1 -emit-hlfir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix HLFIR
+!RUN: %flang_fc1 -emit-fir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix FIR
+
+subroutine sb1(a, x, y, z)
+ integer :: a
+ integer :: x(:)
+ integer :: y(:)
+ integer :: z(:)
+ !$omp parallel workshare
+ z = a * x + y
+ !$omp end parallel workshare
+end subroutine
+
+! HLFIR: func.func @_QPsb1
+! HLFIR: omp.parallel {
+! HLFIR: omp.workshare {
+! HLFIR: hlfir.elemental {{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr<?xi32> {
+! HLFIR: hlfir.elemental {{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr<?xi32> {
+! HLFIR: hlfir.assign
+! HLFIR: hlfir.destroy
+! HLFIR: hlfir.destroy
+! HLFIR-NOT: omp.barrier
+! HLFIR: omp.terminator
+! HLFIR: }
+! HLFIR-NOT: omp.barrier
+! HLFIR: omp.terminator
+! HLFIR: }
+! HLFIR: return
+! HLFIR: }
+! HLFIR:}
+
+
+! FIR: func.func private @_workshare_copy_heap_Uxi32(%{{[a-z0-9]+}}: !fir.ref<!fir.heap<!fir.array<?xi32>>>, %{{[a-z0-9]+}}: !fir.ref<!fir.heap<!fir.array<?xi32>>>
+! FIR: func.func private @_workshare_copy_i32(%{{[a-z0-9]+}}: !fir.ref<i32>, %{{[a-z0-9]+}}: !fir.ref<i32>
+
+! FIR: func.func @_QPsb1
+! FIR: omp.parallel {
+! FIR: omp.single copyprivate(%9 -> @_workshare_copy_i32 : !fir.ref<i32>, %10 -> @_workshare_copy_heap_Uxi32 : !fir.ref<!fir.heap<!fir.array<?xi32>>>) {
+! FIR: fir.allocmem
+! FIR: omp.wsloop {
+! FIR: omp.loop_nest
+! FIR: omp.single nowait {
+! FIR: fir.call @_FortranAAssign
+! FIR: fir.freemem
+! FIR: omp.terminator
+! FIR: }
+! FIR: omp.barrier
+! FIR: omp.terminator
+! FIR: }
>From 754d54b54822b2877329b4c9be3a5924f634189a Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 15:02:54 +0900
Subject: [PATCH 22/27] One more integration test
---
.../OpenMP/workshare-scalar-array-mul.f90 | 67 +++++++++++++++++++
1 file changed, 67 insertions(+)
create mode 100644 flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90
diff --git a/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 b/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90
new file mode 100644
index 00000000000000..2fb9a029bf93a5
--- /dev/null
+++ b/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90
@@ -0,0 +1,67 @@
+!===----------------------------------------------------------------------===!
+! This directory can be used to add Integration tests involving multiple
+! stages of the compiler (for eg. from Fortran to LLVM IR). It should not
+! contain executable tests. We should only add tests here sparingly and only
+! if there is no other way to test. Repeat this message in each test that is
+! added to this directory and sub-directories.
+!===----------------------------------------------------------------------===!
+
+!RUN: %flang_fc1 -emit-hlfir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix HLFIR-O3
+!RUN: %flang_fc1 -emit-fir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix FIR-O3
+
+!RUN: %flang_fc1 -emit-hlfir -fopenmp -O0 %s -o - | FileCheck %s --check-prefix HLFIR-O0
+!RUN: %flang_fc1 -emit-fir -fopenmp -O0 %s -o - | FileCheck %s --check-prefix FIR-O0
+
+program test
+ real :: arr_01(10)
+ !$omp parallel workshare
+ arr_01 = arr_01*2
+ !$omp end parallel workshare
+end program
+
+! HLFIR-O3: omp.parallel {
+! HLFIR-O3: omp.workshare {
+! HLFIR-O3: hlfir.elemental
+! HLFIR-O3: hlfir.assign
+! HLFIR-O3: hlfir.destroy
+! HLFIR-O3: omp.terminator
+! HLFIR-O3: omp.terminator
+
+! FIR-O3: omp.parallel {
+! FIR-O3: omp.wsloop nowait {
+! FIR-O3: omp.loop_nest
+! FIR-O3: omp.terminator
+! FIR-O3: omp.barrier
+! FIR-O3: omp.terminator
+
+! HLFIR-O0: omp.parallel {
+! HLFIR-O0: omp.workshare {
+! HLFIR-O0: hlfir.elemental
+! HLFIR-O0: hlfir.assign
+! HLFIR-O0: hlfir.destroy
+! HLFIR-O0: omp.terminator
+! HLFIR-O0: omp.terminator
+
+! Check the copyprivate copy function
+! FIR-O0: func.func private @_workshare_copy_heap_{{.*}}(%[[DST:.*]]: {{.*}}, %[[SRC:.*]]: {{.*}})
+! FIR-O0: fir.load %[[SRC]]
+! FIR-O0: fir.store {{.*}} to %[[DST]]
+
+! Check that we properly handle the temporary array
+! FIR-O0: omp.parallel {
+! FIR-O0: %[[CP:.*]] = fir.alloca !fir.heap<!fir.array<10xf32>>
+! FIR-O0: omp.single copyprivate(%[[CP]] -> @_workshare_copy_heap_
+! FIR-O0: fir.allocmem
+! FIR-O0: fir.store
+! FIR-O0: omp.terminator
+! FIR-O0: fir.load %[[CP]]
+! FIR-O0: omp.wsloop {
+! FIR-O0: omp.loop_nest
+! FIR-O0: omp.yield
+! FIR-O0: omp.terminator
+! FIR-O0: omp.single nowait {
+! FIR-O0: fir.call @_FortranAAssign
+! FIR-O0: fir.freemem
+! FIR-O0: omp.terminator
+! FIR-O0: omp.barrier
+! FIR-O0: omp.terminator
>From fed4884e8e55b0eece7e79a84910f32ecc876e42 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 15:12:43 +0900
Subject: [PATCH 23/27] Add test for cfg workshare bufferization
---
.../should-use-workshare-lowering-cfg.mlir | 22 +++++++++++++++++++
1 file changed, 22 insertions(+)
create mode 100644 flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir
diff --git a/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir b/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir
new file mode 100644
index 00000000000000..8b6d8097caad87
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir
@@ -0,0 +1,22 @@
+// RUN: fir-opt --bufferize-hlfir %s 2>&1 | FileCheck %s
+
+// CHECK: warning: omp workshare with unstructured control flow currently unsupported.
+func.func @warn_cfg(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
+ omp.workshare {
+ ^bb1:
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ hlfir.yield_element %c1_i32 : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ cf.br ^bb2
+ ^bb2:
+ omp.terminator
+ }
+ return
+}
>From 99d20f9736fc09eb4e2ce9546f7fcd99751ace2a Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 15:24:46 +0900
Subject: [PATCH 24/27] Fix tests
---
.../should-use-workshare-lowering-cfg.mlir | 22 -------------------
.../OpenMP/should-use-workshare-lowering.mlir | 22 +++++++++++++++++++
2 files changed, 22 insertions(+), 22 deletions(-)
delete mode 100644 flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir
diff --git a/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir b/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir
deleted file mode 100644
index 8b6d8097caad87..00000000000000
--- a/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir
+++ /dev/null
@@ -1,22 +0,0 @@
-// RUN: fir-opt --bufferize-hlfir %s 2>&1 | FileCheck %s
-
-// CHECK: warning: omp workshare with unstructured control flow currently unsupported.
-func.func @warn_cfg(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
- omp.workshare {
- ^bb1:
- %c42 = arith.constant 42 : index
- %c1_i32 = arith.constant 1 : i32
- %shape = fir.shape %c42 : (index) -> !fir.shape<1>
- %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
- %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
- ^bb0(%i: index):
- hlfir.yield_element %c1_i32 : i32
- }
- hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
- hlfir.destroy %elemental : !hlfir.expr<42xi32>
- cf.br ^bb2
- ^bb2:
- omp.terminator
- }
- return
-}
diff --git a/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir b/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir
index 229fe592a02b9b..91b08123cce422 100644
--- a/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir
+++ b/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir
@@ -138,3 +138,25 @@ func.func @should_not_parallelize_3(%arg: !fir.ref<!fir.array<42xi32>>, %idx : i
}
return
}
+
+// CHECK-LABEL: @should_not_parallelize_4
+// CHECK-NOT: omp.workshare.loop_wrapper
+func.func @should_not_parallelize_4(%arg: !fir.ref<!fir.array<42xi32>>, %idx : index) {
+ omp.workshare {
+ ^bb1:
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %shape = fir.shape %c42 : (index) -> !fir.shape<1>
+ %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> {
+ ^bb0(%i: index):
+ hlfir.yield_element %c1_i32 : i32
+ }
+ hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref<!fir.array<42xi32>>
+ hlfir.destroy %elemental : !hlfir.expr<42xi32>
+ cf.br ^bb2
+ ^bb2:
+ omp.terminator
+ }
+ return
+}
>From d234efec26d9d7882975c4ef90e9eceff2b45a26 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sat, 19 Oct 2024 23:30:42 +0900
Subject: [PATCH 25/27] Test coverage for all changes
---
.../OpenMP/workshare-array-array-assign.f90 | 35 ++++++++++++++
.../{workshare.f90 => workshare-axpy.f90} | 0
.../OpenMP/workshare-scalar-array-assign.f90 | 46 +++++++++++++++++++
3 files changed, 81 insertions(+)
create mode 100644 flang/test/Integration/OpenMP/workshare-array-array-assign.f90
rename flang/test/Integration/OpenMP/{workshare.f90 => workshare-axpy.f90} (100%)
create mode 100644 flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90
diff --git a/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 b/flang/test/Integration/OpenMP/workshare-array-array-assign.f90
new file mode 100644
index 00000000000000..065f72d5d72d88
--- /dev/null
+++ b/flang/test/Integration/OpenMP/workshare-array-array-assign.f90
@@ -0,0 +1,35 @@
+!===----------------------------------------------------------------------===!
+! This directory can be used to add Integration tests involving multiple
+! stages of the compiler (for eg. from Fortran to LLVM IR). It should not
+! contain executable tests. We should only add tests here sparingly and only
+! if there is no other way to test. Repeat this message in each test that is
+! added to this directory and sub-directories.
+!===----------------------------------------------------------------------===!
+
+!RUN: %flang_fc1 -emit-hlfir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix HLFIR
+!RUN: %flang_fc1 -emit-fir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix FIR
+
+subroutine sb1(x, y)
+ integer :: x(:)
+ integer :: y(:)
+ !$omp parallel workshare
+ x = y
+ !$omp end parallel workshare
+end subroutine
+
+! HLFIR: omp.parallel {
+! HLFIR: omp.workshare {
+! HLFIR: hlfir.assign
+! HLFIR: omp.terminator
+! HLFIR: }
+! HLFIR: omp.terminator
+! HLFIR: }
+
+! FIR: omp.parallel {
+! FIR: omp.wsloop nowait {
+! FIR: omp.loop_nest
+! FIR: omp.terminator
+! FIR: }
+! FIR: omp.barrier
+! FIR: omp.terminator
+! FIR: }
diff --git a/flang/test/Integration/OpenMP/workshare.f90 b/flang/test/Integration/OpenMP/workshare-axpy.f90
similarity index 100%
rename from flang/test/Integration/OpenMP/workshare.f90
rename to flang/test/Integration/OpenMP/workshare-axpy.f90
diff --git a/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 b/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90
new file mode 100644
index 00000000000000..fad1af110792bb
--- /dev/null
+++ b/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90
@@ -0,0 +1,46 @@
+!===----------------------------------------------------------------------===!
+! This directory can be used to add Integration tests involving multiple
+! stages of the compiler (for eg. from Fortran to LLVM IR). It should not
+! contain executable tests. We should only add tests here sparingly and only
+! if there is no other way to test. Repeat this message in each test that is
+! added to this directory and sub-directories.
+!===----------------------------------------------------------------------===!
+
+!RUN: %flang_fc1 -emit-hlfir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix HLFIR
+!RUN: %flang_fc1 -emit-fir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix FIR
+
+subroutine sb1(a, x)
+ integer :: a
+ integer :: x(:)
+ !$omp parallel workshare
+ x = a
+ !$omp end parallel workshare
+end subroutine
+
+! HLFIR: omp.parallel {
+! HLFIR: omp.workshare {
+! HLFIR: %[[SCALAR:.*]] = fir.load %1#0 : !fir.ref<i32>
+! HLFIR: hlfir.assign %[[SCALAR]] to
+! HLFIR: omp.terminator
+! HLFIR: }
+! HLFIR: omp.terminator
+! HLFIR: }
+
+! FIR: omp.parallel {
+! FIR: %[[SCALAR_ALLOCA:.*]] = fir.alloca i32
+! FIR: omp.single copyprivate(%[[SCALAR_ALLOCA]] -> @_workshare_copy_i32 : !fir.ref<i32>) {
+! FIR: %[[SCALAR_LOAD:.*]] = fir.load %{{.*}} : !fir.ref<i32>
+! FIR: fir.store %[[SCALAR_LOAD]] to %[[SCALAR_ALLOCA]] : !fir.ref<i32>
+! FIR: omp.terminator
+! FIR: }
+! FIR: %[[SCALAR_RELOAD:.*]] = fir.load %[[SCALAR_ALLOCA]] : !fir.ref<i32>
+! FIR: %6:3 = fir.box_dims %3, %c0 : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
+! FIR: omp.wsloop nowait {
+! FIR: omp.loop_nest (%arg2) : index = (%c1) to (%6#1) inclusive step (%c1) {
+! FIR: fir.store %[[SCALAR_RELOAD]]
+! FIR: omp.yield
+! FIR: }
+! FIR: omp.terminator
+! FIR: }
+! FIR: omp.barrier
+! FIR: omp.terminator
>From d35b0c83b96f659e54ad6de700eaed591e43ea90 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 20 Oct 2024 01:34:31 +0900
Subject: [PATCH 26/27] Integration tests
---
flang/test/Integration/OpenMP/workshare-array-array-assign.f90 | 1 -
flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 | 1 -
flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 | 2 --
3 files changed, 4 deletions(-)
diff --git a/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 b/flang/test/Integration/OpenMP/workshare-array-array-assign.f90
index 065f72d5d72d88..e9ec5d9175beb5 100644
--- a/flang/test/Integration/OpenMP/workshare-array-array-assign.f90
+++ b/flang/test/Integration/OpenMP/workshare-array-array-assign.f90
@@ -28,7 +28,6 @@ subroutine sb1(x, y)
! FIR: omp.parallel {
! FIR: omp.wsloop nowait {
! FIR: omp.loop_nest
-! FIR: omp.terminator
! FIR: }
! FIR: omp.barrier
! FIR: omp.terminator
diff --git a/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 b/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90
index fad1af110792bb..6c180cd639997c 100644
--- a/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90
+++ b/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90
@@ -40,7 +40,6 @@ subroutine sb1(a, x)
! FIR: fir.store %[[SCALAR_RELOAD]]
! FIR: omp.yield
! FIR: }
-! FIR: omp.terminator
! FIR: }
! FIR: omp.barrier
! FIR: omp.terminator
diff --git a/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 b/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90
index 2fb9a029bf93a5..9b8ef66b48f47d 100644
--- a/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90
+++ b/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90
@@ -30,7 +30,6 @@ program test
! FIR-O3: omp.parallel {
! FIR-O3: omp.wsloop nowait {
! FIR-O3: omp.loop_nest
-! FIR-O3: omp.terminator
! FIR-O3: omp.barrier
! FIR-O3: omp.terminator
@@ -58,7 +57,6 @@ program test
! FIR-O0: omp.wsloop {
! FIR-O0: omp.loop_nest
! FIR-O0: omp.yield
-! FIR-O0: omp.terminator
! FIR-O0: omp.single nowait {
! FIR-O0: fir.call @_FortranAAssign
! FIR-O0: fir.freemem
>From b76551be57566e934936606ee512ce21fb06f0f7 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 20 Oct 2024 01:35:36 +0900
Subject: [PATCH 27/27] bufferize fix
---
flang/test/HLFIR/bufferize-workshare.fir | 1 -
1 file changed, 1 deletion(-)
diff --git a/flang/test/HLFIR/bufferize-workshare.fir b/flang/test/HLFIR/bufferize-workshare.fir
index 9b7341ae43398a..af5abb381937ec 100644
--- a/flang/test/HLFIR/bufferize-workshare.fir
+++ b/flang/test/HLFIR/bufferize-workshare.fir
@@ -21,7 +21,6 @@
// CHECK: hlfir.assign %[[VAL_12]] to %[[VAL_13]] temporary_lhs : i32, !fir.ref<i32>
// CHECK: omp.yield
// CHECK: }
-// CHECK: omp.terminator
// CHECK: }
// CHECK: %[[VAL_14:.*]] = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
// CHECK: %[[VAL_15:.*]] = fir.insert_value %[[VAL_14]], %[[VAL_7]], [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
More information about the llvm-branch-commits
mailing list