[llvm-branch-commits] [flang] [flang] Lower omp.workshare to other omp constructs (PR #101446)
Ivan R. Ivanov via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Oct 4 06:45:49 PDT 2024
https://github.com/ivanradanov updated https://github.com/llvm/llvm-project/pull/101446
>From e56dbd6a0625890fd9a3d6a62675e864ca94a8f5 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Sun, 4 Aug 2024 22:06:55 +0900
Subject: [PATCH 01/10] [flang] Lower omp.workshare to other omp constructs
Change to workshare loop wrapper op
Move single op declaration
Schedule pass properly
Correctly handle nested nested loop nests to be parallelized by workshare
Leave comments for shouldUseWorkshareLowering
Use copyprivate to scatter val from omp.single
TODO still need to implement copy function
TODO transitive check for usage outside of omp.single not imiplemented yet
Transitively check for users outisde of single op
TODO need to implement copy func
TODO need to hoist allocas outside of single regions
Add tests
Hoist allocas
More tests
Emit body for copy func
Test the tmp storing logic
Clean up trivially dead ops
Only handle single-block regions for now
Fix tests for custom assembly for loop wrapper
Only run the lower workshare pass if openmp is enabled
Implement some missing functionality
Fix tests
Fix test
Iterate backwards to find all trivially dead ops
Add expalanation comment for createCopyFun
Update test
---
flang/include/flang/Optimizer/OpenMP/Passes.h | 5 +
.../include/flang/Optimizer/OpenMP/Passes.td | 5 +
flang/include/flang/Tools/CLOptions.inc | 6 +-
flang/include/flang/Tools/CrossToolHelpers.h | 1 +
flang/lib/Frontend/FrontendActions.cpp | 10 +-
flang/lib/Optimizer/OpenMP/CMakeLists.txt | 1 +
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 446 ++++++++++++++++++
flang/test/Fir/basic-program.fir | 1 +
.../Transforms/OpenMP/lower-workshare.mlir | 189 ++++++++
.../Transforms/OpenMP/lower-workshare2.mlir | 23 +
.../Transforms/OpenMP/lower-workshare3.mlir | 74 +++
.../Transforms/OpenMP/lower-workshare4.mlir | 59 +++
.../Transforms/OpenMP/lower-workshare5.mlir | 42 ++
.../Transforms/OpenMP/lower-workshare6.mlir | 51 ++
flang/tools/bbc/bbc.cpp | 5 +-
flang/tools/tco/tco.cpp | 1 +
16 files changed, 915 insertions(+), 4 deletions(-)
create mode 100644 flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare2.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare3.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare4.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare5.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare6.mlir
diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.h b/flang/include/flang/Optimizer/OpenMP/Passes.h
index 403d79667bf448..feb395f1a12dbd 100644
--- a/flang/include/flang/Optimizer/OpenMP/Passes.h
+++ b/flang/include/flang/Optimizer/OpenMP/Passes.h
@@ -25,6 +25,11 @@ namespace flangomp {
#define GEN_PASS_REGISTRATION
#include "flang/Optimizer/OpenMP/Passes.h.inc"
+/// Impelements the logic specified in the 2.8.3 workshare Construct section of
+/// the OpenMP standard which specifies what statements or constructs shall be
+/// divided into units of work.
+bool shouldUseWorkshareLowering(mlir::Operation *op);
+
} // namespace flangomp
#endif // FORTRAN_OPTIMIZER_OPENMP_PASSES_H
diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.td b/flang/include/flang/Optimizer/OpenMP/Passes.td
index 395178e26a5762..041240cad12eb3 100644
--- a/flang/include/flang/Optimizer/OpenMP/Passes.td
+++ b/flang/include/flang/Optimizer/OpenMP/Passes.td
@@ -37,4 +37,9 @@ def FunctionFiltering : Pass<"omp-function-filtering"> {
];
}
+// Needs to be scheduled on Module as we create functions in it
+def LowerWorkshare : Pass<"lower-workshare", "::mlir::ModuleOp"> {
+ let summary = "Lower workshare construct";
+}
+
#endif //FORTRAN_OPTIMIZER_OPENMP_PASSES
diff --git a/flang/include/flang/Tools/CLOptions.inc b/flang/include/flang/Tools/CLOptions.inc
index 1881e23b00045a..bb00e079008a0b 100644
--- a/flang/include/flang/Tools/CLOptions.inc
+++ b/flang/include/flang/Tools/CLOptions.inc
@@ -337,7 +337,7 @@ inline void createDefaultFIROptimizerPassPipeline(
/// \param optLevel - optimization level used for creating FIR optimization
/// passes pipeline
inline void createHLFIRToFIRPassPipeline(
- mlir::PassManager &pm, llvm::OptimizationLevel optLevel = defaultOptLevel) {
+ mlir::PassManager &pm, bool enableOpenMP, llvm::OptimizationLevel optLevel = defaultOptLevel) {
if (optLevel.isOptimizingForSpeed()) {
addCanonicalizerPassWithoutRegionSimplification(pm);
addNestedPassToAllTopLevelOperations(
@@ -354,6 +354,8 @@ inline void createHLFIRToFIRPassPipeline(
pm.addPass(hlfir::createLowerHLFIRIntrinsics());
pm.addPass(hlfir::createBufferizeHLFIR());
pm.addPass(hlfir::createConvertHLFIRtoFIR());
+ if (enableOpenMP)
+ pm.addPass(flangomp::createLowerWorkshare());
}
/// Create a pass pipeline for handling certain OpenMP transformations needed
@@ -425,7 +427,7 @@ inline void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm,
/// passes pipeline
inline void createMLIRToLLVMPassPipeline(mlir::PassManager &pm,
MLIRToLLVMPassPipelineConfig &config, llvm::StringRef inputFilename = {}) {
- fir::createHLFIRToFIRPassPipeline(pm, config.OptLevel);
+ fir::createHLFIRToFIRPassPipeline(pm, config.EnableOpenMP, config.OptLevel);
// Add default optimizer pass pipeline.
fir::createDefaultFIROptimizerPassPipeline(pm, config);
diff --git a/flang/include/flang/Tools/CrossToolHelpers.h b/flang/include/flang/Tools/CrossToolHelpers.h
index 75fd783af237d0..0911b9bca67332 100644
--- a/flang/include/flang/Tools/CrossToolHelpers.h
+++ b/flang/include/flang/Tools/CrossToolHelpers.h
@@ -123,6 +123,7 @@ struct MLIRToLLVMPassPipelineConfig : public FlangEPCallBacks {
false; ///< Set no-signed-zeros-fp-math attribute for functions.
bool UnsafeFPMath = false; ///< Set unsafe-fp-math attribute for functions.
bool NSWOnLoopVarInc = false; ///< Add nsw flag to loop variable increments.
+ bool EnableOpenMP = false; ///< Enable OpenMP lowering.
};
struct OffloadModuleOpts {
diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp
index 5c86bd947ce73f..db5c5649337528 100644
--- a/flang/lib/Frontend/FrontendActions.cpp
+++ b/flang/lib/Frontend/FrontendActions.cpp
@@ -711,7 +711,11 @@ void CodeGenAction::lowerHLFIRToFIR() {
pm.enableVerifier(/*verifyPasses=*/true);
// Create the pass pipeline
- fir::createHLFIRToFIRPassPipeline(pm, level);
+ fir::createHLFIRToFIRPassPipeline(
+ pm,
+ ci.getInvocation().getFrontendOpts().features.IsEnabled(
+ Fortran::common::LanguageFeature::OpenMP),
+ level);
(void)mlir::applyPassManagerCLOptions(pm);
if (!mlir::succeeded(pm.run(*mlirModule))) {
@@ -824,6 +828,10 @@ void CodeGenAction::generateLLVMIR() {
config.VScaleMax = vsr->second;
}
+ if (ci.getInvocation().getFrontendOpts().features.IsEnabled(
+ Fortran::common::LanguageFeature::OpenMP))
+ config.EnableOpenMP = true;
+
if (ci.getInvocation().getLoweringOpts().getNSWOnLoopVarInc())
config.NSWOnLoopVarInc = true;
diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
index 92051634f0378b..39e92d388288d4 100644
--- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt
+++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
@@ -4,6 +4,7 @@ add_flang_library(FlangOpenMPTransforms
FunctionFiltering.cpp
MapInfoFinalization.cpp
MarkDeclareTarget.cpp
+ LowerWorkshare.cpp
DEPENDS
FIRDialect
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
new file mode 100644
index 00000000000000..6e5538b54ba5e0
--- /dev/null
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -0,0 +1,446 @@
+//===- LowerWorkshare.cpp - special cases for bufferization -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the lowering of omp.workshare to other omp constructs.
+//
+// This pass is tasked with parallelizing the loops nested in
+// workshare.loop_wrapper while both the Fortran to mlir lowering and the hlfir
+// to fir lowering pipelines are responsible for emitting the
+// workshare.loop_wrapper ops where appropriate according to the
+// `shouldUseWorkshareLowering` function.
+//
+//===----------------------------------------------------------------------===//
+
+#include <flang/Optimizer/Builder/FIRBuilder.h>
+#include <flang/Optimizer/Dialect/FIROps.h>
+#include <flang/Optimizer/Dialect/FIRType.h>
+#include <flang/Optimizer/HLFIR/HLFIROps.h>
+#include <flang/Optimizer/OpenMP/Passes.h>
+#include <llvm/ADT/BreadthFirstIterator.h>
+#include <llvm/ADT/STLExtras.h>
+#include <llvm/ADT/SmallVectorExtras.h>
+#include <llvm/ADT/iterator_range.h>
+#include <llvm/Support/ErrorHandling.h>
+#include <mlir/Dialect/Arith/IR/Arith.h>
+#include <mlir/Dialect/LLVMIR/LLVMTypes.h>
+#include <mlir/Dialect/OpenMP/OpenMPClauseOperands.h>
+#include <mlir/Dialect/OpenMP/OpenMPDialect.h>
+#include <mlir/Dialect/SCF/IR/SCF.h>
+#include <mlir/IR/BuiltinOps.h>
+#include <mlir/IR/IRMapping.h>
+#include <mlir/IR/OpDefinition.h>
+#include <mlir/IR/PatternMatch.h>
+#include <mlir/IR/Visitors.h>
+#include <mlir/Interfaces/SideEffectInterfaces.h>
+#include <mlir/Support/LLVM.h>
+#include <mlir/Transforms/GreedyPatternRewriteDriver.h>
+
+#include <variant>
+
+namespace flangomp {
+#define GEN_PASS_DEF_LOWERWORKSHARE
+#include "flang/Optimizer/OpenMP/Passes.h.inc"
+} // namespace flangomp
+
+#define DEBUG_TYPE "lower-workshare"
+
+using namespace mlir;
+
+namespace flangomp {
+
+// Checks for nesting pattern below as we need to avoid sharing the work of
+// statements which are nested in some constructs such as omp.critical or
+// another omp.parallel.
+//
+// omp.workshare { // `wsOp`
+// ...
+// omp.T { // `parent`
+// ...
+// `op`
+//
+template <typename T>
+static bool isNestedIn(omp::WorkshareOp wsOp, Operation *op) {
+ T parent = op->getParentOfType<T>();
+ if (!parent)
+ return false;
+ return wsOp->isProperAncestor(parent);
+}
+
+bool shouldUseWorkshareLowering(Operation *op) {
+ auto parentWorkshare = op->getParentOfType<omp::WorkshareOp>();
+
+ if (!parentWorkshare)
+ return false;
+
+ if (isNestedIn<omp::CriticalOp>(parentWorkshare, op))
+ return false;
+
+ // 2.8.3 workshare Construct
+ // For a parallel construct, the construct is a unit of work with respect to
+ // the workshare construct. The statements contained in the parallel construct
+ // are executed by a new thread team.
+ if (isNestedIn<omp::ParallelOp>(parentWorkshare, op))
+ return false;
+
+ // 2.8.2 single Construct
+ // Binding The binding thread set for a single region is the current team. A
+ // single region binds to the innermost enclosing parallel region.
+ // Description Only one of the encountering threads will execute the
+ // structured block associated with the single construct.
+ if (isNestedIn<omp::SingleOp>(parentWorkshare, op))
+ return false;
+
+ return true;
+}
+
+} // namespace flangomp
+
+namespace {
+
+struct SingleRegion {
+ Block::iterator begin, end;
+};
+
+static bool mustParallelizeOp(Operation *op) {
+ return op
+ ->walk([&](Operation *nested) {
+ // We need to be careful not to pick up workshare.loop_wrapper in nested
+ // omp.parallel{omp.workshare} regions, i.e. make sure that `nested`
+ // binds to the workshare region we are currently handling.
+ //
+ // For example:
+ //
+ // omp.parallel {
+ // omp.workshare { // currently handling this
+ // omp.parallel {
+ // omp.workshare { // nested workshare
+ // omp.workshare.loop_wrapper {}
+ //
+ // Therefore, we skip if we encounter a nested omp.workshare.
+ if (isa<omp::WorkshareOp>(op))
+ return WalkResult::skip();
+ if (isa<omp::WorkshareLoopWrapperOp>(op))
+ return WalkResult::interrupt();
+ return WalkResult::advance();
+ })
+ .wasInterrupted();
+}
+
+static bool isSafeToParallelize(Operation *op) {
+ return isa<hlfir::DeclareOp>(op) || isa<fir::DeclareOp>(op) ||
+ isMemoryEffectFree(op);
+}
+
+/// Simple shallow copies suffice for our purposes in this pass, so we implement
+/// this simpler alternative to the full fledged `createCopyFunc` in the
+/// frontend
+static mlir::func::FuncOp createCopyFunc(mlir::Location loc, mlir::Type varType,
+ fir::FirOpBuilder builder) {
+ mlir::ModuleOp module = builder.getModule();
+ auto rt = cast<fir::ReferenceType>(varType);
+ mlir::Type eleTy = rt.getEleTy();
+ std::string copyFuncName =
+ fir::getTypeAsString(eleTy, builder.getKindMap(), "_workshare_copy");
+
+ if (auto decl = module.lookupSymbol<mlir::func::FuncOp>(copyFuncName))
+ return decl;
+ // create function
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::OpBuilder modBuilder(module.getBodyRegion());
+ llvm::SmallVector<mlir::Type> argsTy = {varType, varType};
+ auto funcType = mlir::FunctionType::get(builder.getContext(), argsTy, {});
+ mlir::func::FuncOp funcOp =
+ modBuilder.create<mlir::func::FuncOp>(loc, copyFuncName, funcType);
+ funcOp.setVisibility(mlir::SymbolTable::Visibility::Private);
+ builder.createBlock(&funcOp.getRegion(), funcOp.getRegion().end(), argsTy,
+ {loc, loc});
+ builder.setInsertionPointToStart(&funcOp.getRegion().back());
+
+ Value loaded = builder.create<fir::LoadOp>(loc, funcOp.getArgument(0));
+ builder.create<fir::StoreOp>(loc, loaded, funcOp.getArgument(1));
+
+ builder.create<mlir::func::ReturnOp>(loc);
+ return funcOp;
+}
+
+static bool isUserOutsideSR(Operation *user, Operation *parentOp,
+ SingleRegion sr) {
+ while (user->getParentOp() != parentOp)
+ user = user->getParentOp();
+ return sr.begin->getBlock() != user->getBlock() ||
+ !(user->isBeforeInBlock(&*sr.end) && sr.begin->isBeforeInBlock(user));
+}
+
+static bool isTransitivelyUsedOutside(Value v, SingleRegion sr) {
+ Block *srBlock = sr.begin->getBlock();
+ Operation *parentOp = srBlock->getParentOp();
+
+ for (auto &use : v.getUses()) {
+ Operation *user = use.getOwner();
+ if (isUserOutsideSR(user, parentOp, sr))
+ return true;
+
+ // Results of nested users cannot be used outside of the SR
+ if (user->getBlock() != srBlock)
+ continue;
+
+ // A non-safe to parallelize operation will be handled separately
+ if (!isSafeToParallelize(user))
+ continue;
+
+ for (auto res : user->getResults())
+ if (isTransitivelyUsedOutside(res, sr))
+ return true;
+ }
+ return false;
+}
+
+/// We clone pure operations in both the parallel and single blocks. this
+/// functions cleans them up if they end up with no uses
+static void cleanupBlock(Block *block) {
+ for (Operation &op : llvm::make_early_inc_range(
+ llvm::make_range(block->rbegin(), block->rend())))
+ if (isOpTriviallyDead(&op))
+ op.erase();
+}
+
+static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
+ IRMapping &rootMapping, Location loc,
+ mlir::DominanceInfo &di) {
+ OpBuilder rootBuilder(sourceRegion.getContext());
+ ModuleOp m = sourceRegion.getParentOfType<ModuleOp>();
+ OpBuilder copyFuncBuilder(m.getBodyRegion());
+ fir::FirOpBuilder firCopyFuncBuilder(copyFuncBuilder, m);
+
+ auto mapReloadedValue =
+ [&](Value v, OpBuilder allocaBuilder, OpBuilder singleBuilder,
+ OpBuilder parallelBuilder, IRMapping singleMapping) -> Value {
+ if (auto reloaded = rootMapping.lookupOrNull(v))
+ return nullptr;
+ Type ty = v.getType();
+ Value alloc = allocaBuilder.create<fir::AllocaOp>(loc, ty);
+ singleBuilder.create<fir::StoreOp>(loc, singleMapping.lookup(v), alloc);
+ Value reloaded = parallelBuilder.create<fir::LoadOp>(loc, ty, alloc);
+ rootMapping.map(v, reloaded);
+ return alloc;
+ };
+
+ auto moveToSingle = [&](SingleRegion sr, OpBuilder allocaBuilder,
+ OpBuilder singleBuilder,
+ OpBuilder parallelBuilder) -> SmallVector<Value> {
+ IRMapping singleMapping = rootMapping;
+ SmallVector<Value> copyPrivate;
+
+ for (Operation &op : llvm::make_range(sr.begin, sr.end)) {
+ if (isSafeToParallelize(&op)) {
+ singleBuilder.clone(op, singleMapping);
+ parallelBuilder.clone(op, rootMapping);
+ } else if (auto alloca = dyn_cast<fir::AllocaOp>(&op)) {
+ auto hoisted =
+ cast<fir::AllocaOp>(allocaBuilder.clone(*alloca, singleMapping));
+ rootMapping.map(&*alloca, &*hoisted);
+ rootMapping.map(alloca.getResult(), hoisted.getResult());
+ copyPrivate.push_back(hoisted);
+ } else {
+ singleBuilder.clone(op, singleMapping);
+ // Prepare reloaded values for results of operations that cannot be
+ // safely parallelized and which are used after the region `sr`
+ for (auto res : op.getResults()) {
+ if (isTransitivelyUsedOutside(res, sr)) {
+ auto alloc = mapReloadedValue(res, allocaBuilder, singleBuilder,
+ parallelBuilder, singleMapping);
+ if (alloc)
+ copyPrivate.push_back(alloc);
+ }
+ }
+ }
+ }
+ singleBuilder.create<omp::TerminatorOp>(loc);
+ return copyPrivate;
+ };
+
+ for (Block &block : sourceRegion) {
+ Block *targetBlock = rootBuilder.createBlock(
+ &targetRegion, {}, block.getArgumentTypes(),
+ llvm::map_to_vector(block.getArguments(),
+ [](BlockArgument arg) { return arg.getLoc(); }));
+ rootMapping.map(&block, targetBlock);
+ rootMapping.map(block.getArguments(), targetBlock->getArguments());
+ }
+
+ auto handleOneBlock = [&](Block &block) {
+ Block &targetBlock = *rootMapping.lookup(&block);
+ rootBuilder.setInsertionPointToStart(&targetBlock);
+ Operation *terminator = block.getTerminator();
+ SmallVector<std::variant<SingleRegion, Operation *>> regions;
+
+ auto it = block.begin();
+ auto getOneRegion = [&]() {
+ if (&*it == terminator)
+ return false;
+ if (mustParallelizeOp(&*it)) {
+ regions.push_back(&*it);
+ it++;
+ return true;
+ }
+ SingleRegion sr;
+ sr.begin = it;
+ while (&*it != terminator && !mustParallelizeOp(&*it))
+ it++;
+ sr.end = it;
+ assert(sr.begin != sr.end);
+ regions.push_back(sr);
+ return true;
+ };
+ while (getOneRegion())
+ ;
+
+ for (auto [i, opOrSingle] : llvm::enumerate(regions)) {
+ bool isLast = i + 1 == regions.size();
+ if (std::holds_alternative<SingleRegion>(opOrSingle)) {
+ OpBuilder singleBuilder(sourceRegion.getContext());
+ Block *singleBlock = new Block();
+ singleBuilder.setInsertionPointToStart(singleBlock);
+
+ OpBuilder allocaBuilder(sourceRegion.getContext());
+ Block *allocaBlock = new Block();
+ allocaBuilder.setInsertionPointToStart(allocaBlock);
+
+ OpBuilder parallelBuilder(sourceRegion.getContext());
+ Block *parallelBlock = new Block();
+ parallelBuilder.setInsertionPointToStart(parallelBlock);
+
+ omp::SingleOperands singleOperands;
+ if (isLast)
+ singleOperands.nowait = rootBuilder.getUnitAttr();
+ singleOperands.copyprivateVars =
+ moveToSingle(std::get<SingleRegion>(opOrSingle), allocaBuilder,
+ singleBuilder, parallelBuilder);
+ cleanupBlock(singleBlock);
+ for (auto var : singleOperands.copyprivateVars) {
+ mlir::func::FuncOp funcOp =
+ createCopyFunc(loc, var.getType(), firCopyFuncBuilder);
+ singleOperands.copyprivateSyms.push_back(SymbolRefAttr::get(funcOp));
+ }
+ omp::SingleOp singleOp =
+ rootBuilder.create<omp::SingleOp>(loc, singleOperands);
+ singleOp.getRegion().push_back(singleBlock);
+ rootBuilder.getInsertionBlock()->getOperations().splice(
+ rootBuilder.getInsertionPoint(), parallelBlock->getOperations());
+ targetRegion.front().getOperations().splice(
+ singleOp->getIterator(), allocaBlock->getOperations());
+ delete allocaBlock;
+ delete parallelBlock;
+ } else {
+ auto op = std::get<Operation *>(opOrSingle);
+ if (auto wslw = dyn_cast<omp::WorkshareLoopWrapperOp>(op)) {
+ omp::WsloopOperands wsloopOperands;
+ if (isLast)
+ wsloopOperands.nowait = rootBuilder.getUnitAttr();
+ auto wsloop =
+ rootBuilder.create<mlir::omp::WsloopOp>(loc, wsloopOperands);
+ auto clonedWslw = cast<omp::WorkshareLoopWrapperOp>(
+ rootBuilder.clone(*wslw, rootMapping));
+ wsloop.getRegion().takeBody(clonedWslw.getRegion());
+ clonedWslw->erase();
+ } else {
+ assert(mustParallelizeOp(op));
+ Operation *cloned = rootBuilder.cloneWithoutRegions(*op, rootMapping);
+ for (auto [region, clonedRegion] :
+ llvm::zip(op->getRegions(), cloned->getRegions()))
+ parallelizeRegion(region, clonedRegion, rootMapping, loc, di);
+ }
+ }
+ }
+
+ rootBuilder.clone(*block.getTerminator(), rootMapping);
+ };
+
+ if (sourceRegion.hasOneBlock()) {
+ handleOneBlock(sourceRegion.front());
+ } else {
+ auto &domTree = di.getDomTree(&sourceRegion);
+ for (auto node : llvm::breadth_first(domTree.getRootNode())) {
+ handleOneBlock(*node->getBlock());
+ }
+ }
+
+ for (Block &targetBlock : targetRegion)
+ cleanupBlock(&targetBlock);
+}
+
+/// Lowers workshare to a sequence of single-thread regions and parallel loops
+///
+/// For example:
+///
+/// omp.workshare {
+/// %a = fir.allocmem
+/// omp.workshare.loop_wrapper {}
+/// fir.call Assign %b %a
+/// fir.freemem %a
+/// }
+///
+/// becomes
+///
+/// %tmp = fir.alloca
+/// omp.single copyprivate(%tmp) {
+/// %a = fir.allocmem
+/// fir.store %a %tmp
+/// }
+/// %a_reloaded = fir.load %tmp
+/// omp.workshare.loop_wrapper {}
+/// omp.single {
+/// fir.call Assign %b %a_reloaded
+/// fir.freemem %a_reloaded
+/// }
+///
+/// Note that we allocate temporary memory for values in omp.single's which need
+/// to be accessed by all threads and broadcast them using single's copyprivate
+LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
+ Location loc = wsOp->getLoc();
+ IRMapping rootMapping;
+
+ OpBuilder rootBuilder(wsOp);
+
+ // This operation is just a placeholder which will be erased later. We need it
+ // because our `parallelizeRegion` function works on regions and not blocks.
+ omp::WorkshareOp newOp =
+ rootBuilder.create<omp::WorkshareOp>(loc, omp::WorkshareOperands());
+ if (!wsOp.getNowait())
+ rootBuilder.create<omp::BarrierOp>(loc);
+
+ parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di);
+
+ if (wsOp.getRegion().getBlocks().size() != 1)
+ return failure();
+
+ // Inline the contents of the placeholder workshare op into its parent block.
+ Block *theBlock = &newOp.getRegion().front();
+ Operation *term = theBlock->getTerminator();
+ Block *parentBlock = wsOp->getBlock();
+ parentBlock->getOperations().splice(newOp->getIterator(),
+ theBlock->getOperations());
+ assert(term->getNumOperands() == 0);
+ term->erase();
+ newOp->erase();
+ wsOp->erase();
+ return success();
+}
+
+class LowerWorksharePass
+ : public flangomp::impl::LowerWorkshareBase<LowerWorksharePass> {
+public:
+ void runOnOperation() override {
+ mlir::DominanceInfo &di = getAnalysis<mlir::DominanceInfo>();
+ getOperation()->walk([&](mlir::omp::WorkshareOp wsOp) {
+ if (failed(lowerWorkshare(wsOp, di)))
+ signalPassFailure();
+ });
+ }
+};
+} // namespace
diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir
index bca454c13ff9cc..4b18acb7c2b430 100644
--- a/flang/test/Fir/basic-program.fir
+++ b/flang/test/Fir/basic-program.fir
@@ -47,6 +47,7 @@ func.func @_QQmain() {
// PASSES-NEXT: LowerHLFIRIntrinsics
// PASSES-NEXT: BufferizeHLFIR
// PASSES-NEXT: ConvertHLFIRtoFIR
+// PASSES-NEXT: LowerWorkshare
// PASSES-NEXT: CSE
// PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd
// PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
diff --git a/flang/test/Transforms/OpenMP/lower-workshare.mlir b/flang/test/Transforms/OpenMP/lower-workshare.mlir
new file mode 100644
index 00000000000000..a609ee5d3d6c2a
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare.mlir
@@ -0,0 +1,189 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// checks:
+// nowait on final omp.single
+func.func @wsfunc(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ omp.parallel {
+ omp.workshare {
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %0 = fir.shape %c42 : (index) -> !fir.shape<1>
+ %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+ %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+ %true = arith.constant true
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ %8 = fir.load %7 : !fir.ref<i32>
+ %9 = arith.subi %8, %c1_i32 : i32
+ %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref<i32>
+ omp.yield
+ }
+ omp.terminator
+ }
+ %4 = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ %5 = fir.insert_value %4, %true, [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ hlfir.assign %3#0 to %1#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+ fir.freemem %3#0 : !fir.heap<!fir.array<42xi32>>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// -----
+
+// checks:
+// fir.alloca hoisted out and copyprivate'd
+func.func @wsfunc(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ omp.workshare {
+ %c1_i32 = arith.constant 1 : i32
+ %alloc = fir.alloca i32
+ fir.store %c1_i32 to %alloc : !fir.ref<i32>
+ %c42 = arith.constant 42 : index
+ %0 = fir.shape %c42 : (index) -> !fir.shape<1>
+ %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+ %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+ %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+ %true = arith.constant true
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ %8 = fir.load %7 : !fir.ref<i32>
+ %ld = fir.load %alloc : !fir.ref<i32>
+ %n8 = arith.subi %8, %ld : i32
+ %9 = arith.subi %n8, %c1_i32 : i32
+ %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+ hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref<i32>
+ omp.yield
+ }
+ omp.terminator
+ }
+ %4 = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ %5 = fir.insert_value %4, %true, [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
+ "test.test1"(%alloc) : (!fir.ref<i32>) -> ()
+ hlfir.assign %3#0 to %1#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+ fir.freemem %3#0 : !fir.heap<!fir.array<42xi32>>
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @wsfunc(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
+// CHECK: omp.parallel {
+// CHECK: %[[VAL_1:.*]] = fir.alloca !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_heap_42xi32 : !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
+// CHECK: %[[VAL_2:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+// CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]](%[[VAL_3]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_7:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
+// CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_7]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_9]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]](%[[VAL_9]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: %[[VAL_13:.*]] = arith.constant 1 : index
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_14:.*]]) : index = (%[[VAL_13]]) to (%[[VAL_7]]) inclusive step (%[[VAL_13]]) {
+// CHECK: %[[VAL_15:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_14]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref<i32>
+// CHECK: %[[VAL_17:.*]] = arith.subi %[[VAL_16]], %[[VAL_8]] : i32
+// CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_14]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_18]] temporary_lhs : i32, !fir.ref<i32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.single nowait {
+// CHECK: hlfir.assign %[[VAL_12]]#0 to %[[VAL_10]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+// CHECK: fir.freemem %[[VAL_12]]#0 : !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func private @_workshare_copy_i32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @wsfunc(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
+// CHECK: %[[VAL_1:.*]] = fir.alloca i32
+// CHECK: %[[VAL_2:.*]] = fir.alloca !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_i32 : !fir.ref<i32>, %[[VAL_2]] -> @_workshare_copy_heap_42xi32 : !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
+// CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32
+// CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<i32>
+// CHECK: %[[VAL_4:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_5]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_7:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
+// CHECK: fir.store %[[VAL_7]] to %[[VAL_2]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]](%[[VAL_5]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
+// CHECK: %[[VAL_10:.*]] = arith.constant 42 : index
+// CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_11]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
+// CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_2]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
+// CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_13]](%[[VAL_11]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
+// CHECK: %[[VAL_15:.*]] = arith.constant 1 : index
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_16:.*]]) : index = (%[[VAL_15]]) to (%[[VAL_10]]) inclusive step (%[[VAL_15]]) {
+// CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_16]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
+// CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+// CHECK: %[[VAL_20:.*]] = arith.subi %[[VAL_18]], %[[VAL_19]] : i32
+// CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_9]] : i32
+// CHECK: %[[VAL_22:.*]] = hlfir.designate %[[VAL_14]]#0 (%[[VAL_16]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
+// CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_22]] temporary_lhs : i32, !fir.ref<i32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.single nowait {
+// CHECK: "test.test1"(%[[VAL_1]]) : (!fir.ref<i32>) -> ()
+// CHECK: hlfir.assign %[[VAL_14]]#0 to %[[VAL_12]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
+// CHECK: fir.freemem %[[VAL_14]]#0 : !fir.heap<!fir.array<42xi32>>
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: return
+// CHECK: }
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare2.mlir b/flang/test/Transforms/OpenMP/lower-workshare2.mlir
new file mode 100644
index 00000000000000..940662e0bdccc2
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare2.mlir
@@ -0,0 +1,23 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that we correctly handle nowait
+
+// CHECK-LABEL: func.func @nonowait
+func.func @nonowait(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ // CHECK: omp.barrier
+ omp.workshare {
+ omp.terminator
+ }
+ return
+}
+
+// -----
+
+// CHECK-LABEL: func.func @nowait
+func.func @nowait(%arg0: !fir.ref<!fir.array<42xi32>>) {
+ // CHECK-NOT: omp.barrier
+ omp.workshare nowait {
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare3.mlir b/flang/test/Transforms/OpenMP/lower-workshare3.mlir
new file mode 100644
index 00000000000000..5a3d583527fddb
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare3.mlir
@@ -0,0 +1,74 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+
+// Check if we store the correct values
+
+func.func @wsfunc() {
+ omp.parallel {
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK: fir.alloca
+ // CHECK-NOT: fir.alloca
+ omp.workshare {
+
+ %t1 = "test.test1"() : () -> i32
+ // CHECK: %[[T1:.*]] = "test.test1"
+ // CHECK: fir.store %[[T1]]
+ %t2 = "test.test2"() : () -> i32
+ // CHECK: %[[T2:.*]] = "test.test2"
+ // CHECK: fir.store %[[T2]]
+ %t3 = "test.test3"() : () -> i32
+ // CHECK: %[[T3:.*]] = "test.test3"
+ // CHECK-NOT: fir.store %[[T3]]
+ %t4 = "test.test4"() : () -> i32
+ // CHECK: %[[T4:.*]] = "test.test4"
+ // CHECK: fir.store %[[T4]]
+ %t5 = "test.test5"() : () -> i32
+ // CHECK: %[[T5:.*]] = "test.test5"
+ // CHECK: fir.store %[[T5]]
+ %t6 = "test.test6"() : () -> i32
+ // CHECK: %[[T6:.*]] = "test.test6"
+ // CHECK-NOT: fir.store %[[T6]]
+
+
+ "test.test1"(%t1) : (i32) -> ()
+ "test.test1"(%t2) : (i32) -> ()
+ "test.test1"(%t3) : (i32) -> ()
+
+ %true = arith.constant true
+ fir.if %true {
+ "test.test2"(%t3) : (i32) -> ()
+ }
+
+ %c1_i32 = arith.constant 1 : i32
+
+ %t5_pure_use = arith.addi %t5, %c1_i32 : i32
+
+ %t6_mem_effect_use = "test.test8"(%t6) : (i32) -> i32
+ // CHECK: %[[T6_USE:.*]] = "test.test8"
+ // CHECK: fir.store %[[T6_USE]]
+
+ %c42 = arith.constant 42 : index
+ %c1 = arith.constant 1 : index
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test10"(%t1) : (i32) -> ()
+ "test.test10"(%t5_pure_use) : (i32) -> ()
+ "test.test10"(%t6_mem_effect_use) : (i32) -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+
+ "test.test10"(%t2) : (i32) -> ()
+ fir.if %true {
+ "test.test10"(%t4) : (i32) -> ()
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare4.mlir b/flang/test/Transforms/OpenMP/lower-workshare4.mlir
new file mode 100644
index 00000000000000..02fe90097008db
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare4.mlir
@@ -0,0 +1,59 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that we cleanup unused pure operations from the parallel and single
+// regions
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = fir.alloca i32
+// CHECK: omp.parallel {
+// CHECK: omp.single {
+// CHECK: %[[VAL_1:.*]] = "test.test1"() : () -> i32
+// CHECK: %[[VAL_2:.*]] = arith.constant 2 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 3 : index
+// CHECK: %[[VAL_4:.*]] = arith.addi %[[VAL_2]], %[[VAL_3]] : index
+// CHECK: "test.test3"(%[[VAL_4]]) : (index) -> ()
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_6:.*]] = arith.constant 42 : index
+// CHECK: omp.wsloop nowait {
+// CHECK: omp.loop_nest (%[[VAL_7:.*]]) : index = (%[[VAL_5]]) to (%[[VAL_6]]) inclusive step (%[[VAL_5]]) {
+// CHECK: "test.test2"() : () -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ %t1 = "test.test1"() : () -> i32
+
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+
+ %c2 = arith.constant 2 : index
+ %c3 = arith.constant 3 : index
+ %add = arith.addi %c2, %c3 : index
+ "test.test3"(%add) : (index) -> ()
+
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test2"() : () -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare5.mlir b/flang/test/Transforms/OpenMP/lower-workshare5.mlir
new file mode 100644
index 00000000000000..177f8aa8f86c7c
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare5.mlir
@@ -0,0 +1,42 @@
+// XFAIL: *
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// TODO we can lower these but we have no guarantee that the parent of
+// omp.workshare supports multi-block regions, thus we fail for now.
+
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb3(%arg1: i32):
+ "test.test2"(%arg1) : (i32) -> ()
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// -----
+
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb2:
+ "test.test2"(%r) : (i32) -> ()
+ omp.terminator
+ ^bb3(%arg1: i32):
+ %r = "test.test2"(%arg1) : (i32) -> i32
+ cf.br ^bb2
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare6.mlir b/flang/test/Transforms/OpenMP/lower-workshare6.mlir
new file mode 100644
index 00000000000000..48379470e92562
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare6.mlir
@@ -0,0 +1,51 @@
+// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Checks that the omp.workshare.loop_wrapper binds to the correct omp.workshare
+
+func.func @wsfunc() {
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+ omp.parallel {
+ omp.workshare nowait {
+ omp.parallel {
+ omp.workshare nowait {
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test2"() : () -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_1:.*]] = arith.constant 42 : index
+// CHECK: omp.parallel {
+// CHECK: omp.single nowait {
+// CHECK: omp.parallel {
+// CHECK: omp.wsloop nowait {
+// CHECK: omp.loop_nest (%[[VAL_2:.*]]) : index = (%[[VAL_0]]) to (%[[VAL_1]]) inclusive step (%[[VAL_0]]) {
+// CHECK: "test.test2"() : () -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
diff --git a/flang/tools/bbc/bbc.cpp b/flang/tools/bbc/bbc.cpp
index 736d68219581dd..1a7dac1b76bc20 100644
--- a/flang/tools/bbc/bbc.cpp
+++ b/flang/tools/bbc/bbc.cpp
@@ -440,7 +440,8 @@ static llvm::LogicalResult convertFortranSourceToMLIR(
if (emitFIR && useHLFIR) {
// lower HLFIR to FIR
- fir::createHLFIRToFIRPassPipeline(pm, llvm::OptimizationLevel::O2);
+ fir::createHLFIRToFIRPassPipeline(pm, enableOpenMP,
+ llvm::OptimizationLevel::O2);
if (mlir::failed(pm.run(mlirModule))) {
llvm::errs() << "FATAL: lowering from HLFIR to FIR failed";
return mlir::failure();
@@ -455,6 +456,8 @@ static llvm::LogicalResult convertFortranSourceToMLIR(
// Add O2 optimizer pass pipeline.
MLIRToLLVMPassPipelineConfig config(llvm::OptimizationLevel::O2);
+ if (enableOpenMP)
+ config.EnableOpenMP = true;
config.NSWOnLoopVarInc = setNSW;
fir::registerDefaultInlinerPass(config);
fir::createDefaultFIROptimizerPassPipeline(pm, config);
diff --git a/flang/tools/tco/tco.cpp b/flang/tools/tco/tco.cpp
index a8c64333109aeb..06892cdc3f6a80 100644
--- a/flang/tools/tco/tco.cpp
+++ b/flang/tools/tco/tco.cpp
@@ -138,6 +138,7 @@ compileFIR(const mlir::PassPipelineCLParser &passPipeline) {
return mlir::failure();
} else {
MLIRToLLVMPassPipelineConfig config(llvm::OptimizationLevel::O2);
+ config.EnableOpenMP = true; // assume the input contains OpenMP
config.AliasAnalysis = true; // enabled when optimizing for speed
if (codeGenLLVM) {
// Run only CodeGen passes.
>From 975a0d74c5ae81c69844b8bd089832ed53278477 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Mon, 23 Sep 2024 15:07:48 +0900
Subject: [PATCH 02/10] Emit a proper error message for CFG in workshare
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 13 +++++-
.../OpenMP/lower-workshare-todo-cfg-dom.mlir | 23 ++++++++++
.../OpenMP/lower-workshare-todo-cfg.mlir | 20 +++++++++
.../Transforms/OpenMP/lower-workshare5.mlir | 42 -------------------
4 files changed, 55 insertions(+), 43 deletions(-)
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
delete mode 100644 flang/test/Transforms/OpenMP/lower-workshare5.mlir
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index 6e5538b54ba5e0..cf1867311cc236 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -16,6 +16,7 @@
//
//===----------------------------------------------------------------------===//
+#include "flang/Optimizer/Builder/Todo.h"
#include <flang/Optimizer/Builder/FIRBuilder.h>
#include <flang/Optimizer/Dialect/FIROps.h>
#include <flang/Optimizer/Dialect/FIRType.h>
@@ -416,8 +417,18 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di);
+ // FIXME Currently, we only support workshare constructs with structured
+ // control flow. The transformation itself supports CFG, however, once we
+ // transform the MLIR region in the omp.workshare, we need to inline that
+ // region in the parent block. We have no guarantees at this point of the
+ // pipeline that the parent op supports CFG (e.g. fir.if), thus this is not
+ // generally possible. The alternative is to put the lowered region in an
+ // operation akin to scf.execute_region, which will get lowered at the same
+ // time when fir ops get lowered to CFG. However, SCF is not registered in
+ // flang so we cannot use it. Remove this requirement once we have
+ // scf.execute_region or an alternative operation available.
if (wsOp.getRegion().getBlocks().size() != 1)
- return failure();
+ TODO(wsOp->getLoc(), "omp workshare with unstructured control flow");
// Inline the contents of the placeholder workshare op into its parent block.
Block *theBlock = &newOp.getRegion().front();
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
new file mode 100644
index 00000000000000..1c47d448f597d9
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -0,0 +1,23 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+
+// CHECK: not yet implemented: omp workshare with unstructured control flow
+
+// Check that the definition of %r dominates its use post-transform
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb2:
+ "test.test2"(%r) : (i32) -> ()
+ omp.terminator
+ ^bb3(%arg1: i32):
+ %r = "test.test2"(%arg1) : (i32) -> i32
+ cf.br ^bb2
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
new file mode 100644
index 00000000000000..bf6c196a05b4a3
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -0,0 +1,20 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+
+// CHECK: not yet implemented: omp workshare with unstructured control flow
+
+// Check transforming a simple CFG
+func.func @wsfunc() {
+ %a = fir.alloca i32
+ omp.parallel {
+ omp.workshare {
+ ^bb1:
+ %c1 = arith.constant 1 : i32
+ cf.br ^bb3(%c1: i32)
+ ^bb3(%arg1: i32):
+ "test.test2"(%arg1) : (i32) -> ()
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workshare5.mlir b/flang/test/Transforms/OpenMP/lower-workshare5.mlir
deleted file mode 100644
index 177f8aa8f86c7c..00000000000000
--- a/flang/test/Transforms/OpenMP/lower-workshare5.mlir
+++ /dev/null
@@ -1,42 +0,0 @@
-// XFAIL: *
-// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
-
-// TODO we can lower these but we have no guarantee that the parent of
-// omp.workshare supports multi-block regions, thus we fail for now.
-
-func.func @wsfunc() {
- %a = fir.alloca i32
- omp.parallel {
- omp.workshare {
- ^bb1:
- %c1 = arith.constant 1 : i32
- cf.br ^bb3(%c1: i32)
- ^bb3(%arg1: i32):
- "test.test2"(%arg1) : (i32) -> ()
- omp.terminator
- }
- omp.terminator
- }
- return
-}
-
-// -----
-
-func.func @wsfunc() {
- %a = fir.alloca i32
- omp.parallel {
- omp.workshare {
- ^bb1:
- %c1 = arith.constant 1 : i32
- cf.br ^bb3(%c1: i32)
- ^bb2:
- "test.test2"(%r) : (i32) -> ()
- omp.terminator
- ^bb3(%arg1: i32):
- %r = "test.test2"(%arg1) : (i32) -> i32
- cf.br ^bb2
- }
- omp.terminator
- }
- return
-}
>From 79ac7998609480d18be4ea3bc61b6c1c77089f70 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Mon, 23 Sep 2024 15:44:23 +0900
Subject: [PATCH 03/10] Cleanup tests
---
.../OpenMP/lower-workshare-alloca.mlir | 55 +++++
...are6.mlir => lower-workshare-binding.mlir} | 0
...are4.mlir => lower-workshare-cleanup.mlir} | 0
....mlir => lower-workshare-copyprivate.mlir} | 0
...hare2.mlir => lower-workshare-nowait.mlir} | 0
.../Transforms/OpenMP/lower-workshare.mlir | 189 ------------------
6 files changed, 55 insertions(+), 189 deletions(-)
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
rename flang/test/Transforms/OpenMP/{lower-workshare6.mlir => lower-workshare-binding.mlir} (100%)
rename flang/test/Transforms/OpenMP/{lower-workshare4.mlir => lower-workshare-cleanup.mlir} (100%)
rename flang/test/Transforms/OpenMP/{lower-workshare3.mlir => lower-workshare-copyprivate.mlir} (100%)
rename flang/test/Transforms/OpenMP/{lower-workshare2.mlir => lower-workshare-nowait.mlir} (100%)
delete mode 100644 flang/test/Transforms/OpenMP/lower-workshare.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
new file mode 100644
index 00000000000000..d1bef3a359e487
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
@@ -0,0 +1,55 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Checks that fir.alloca is hoisted out and copyprivate'd
+func.func @wsfunc() {
+ omp.workshare {
+ %c1 = arith.constant 1 : index
+ %c42 = arith.constant 42 : index
+ %c1_i32 = arith.constant 1 : i32
+ %alloc = fir.alloca i32
+ fir.store %c1_i32 to %alloc : !fir.ref<i32>
+ omp.workshare.loop_wrapper {
+ omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
+ "test.test1"(%alloc) : (!fir.ref<i32>) -> ()
+ omp.yield
+ }
+ omp.terminator
+ }
+ "test.test2"(%alloc) : (!fir.ref<i32>) -> ()
+ omp.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: func.func private @_workshare_copy_i32(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @wsfunc() {
+// CHECK: %[[VAL_0:.*]] = fir.alloca i32
+// CHECK: omp.single copyprivate(%[[VAL_0]] -> @_workshare_copy_i32 : !fir.ref<i32>) {
+// CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32
+// CHECK: fir.store %[[VAL_1]] to %[[VAL_0]] : !fir.ref<i32>
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_2:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 42 : index
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_4:.*]]) : index = (%[[VAL_2]]) to (%[[VAL_3]]) inclusive step (%[[VAL_2]]) {
+// CHECK: "test.test1"(%[[VAL_0]]) : (!fir.ref<i32>) -> ()
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.single nowait {
+// CHECK: "test.test2"(%[[VAL_0]]) : (!fir.ref<i32>) -> ()
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.barrier
+// CHECK: return
+// CHECK: }
+
diff --git a/flang/test/Transforms/OpenMP/lower-workshare6.mlir b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
similarity index 100%
rename from flang/test/Transforms/OpenMP/lower-workshare6.mlir
rename to flang/test/Transforms/OpenMP/lower-workshare-binding.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare4.mlir b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
similarity index 100%
rename from flang/test/Transforms/OpenMP/lower-workshare4.mlir
rename to flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare3.mlir b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
similarity index 100%
rename from flang/test/Transforms/OpenMP/lower-workshare3.mlir
rename to flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare2.mlir b/flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir
similarity index 100%
rename from flang/test/Transforms/OpenMP/lower-workshare2.mlir
rename to flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir
diff --git a/flang/test/Transforms/OpenMP/lower-workshare.mlir b/flang/test/Transforms/OpenMP/lower-workshare.mlir
deleted file mode 100644
index a609ee5d3d6c2a..00000000000000
--- a/flang/test/Transforms/OpenMP/lower-workshare.mlir
+++ /dev/null
@@ -1,189 +0,0 @@
-// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
-
-// checks:
-// nowait on final omp.single
-func.func @wsfunc(%arg0: !fir.ref<!fir.array<42xi32>>) {
- omp.parallel {
- omp.workshare {
- %c42 = arith.constant 42 : index
- %c1_i32 = arith.constant 1 : i32
- %0 = fir.shape %c42 : (index) -> !fir.shape<1>
- %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
- %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
- %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
- %true = arith.constant true
- %c1 = arith.constant 1 : index
- omp.workshare.loop_wrapper {
- omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
- %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
- %8 = fir.load %7 : !fir.ref<i32>
- %9 = arith.subi %8, %c1_i32 : i32
- %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
- hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref<i32>
- omp.yield
- }
- omp.terminator
- }
- %4 = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
- %5 = fir.insert_value %4, %true, [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
- %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
- hlfir.assign %3#0 to %1#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
- fir.freemem %3#0 : !fir.heap<!fir.array<42xi32>>
- omp.terminator
- }
- omp.terminator
- }
- return
-}
-
-// -----
-
-// checks:
-// fir.alloca hoisted out and copyprivate'd
-func.func @wsfunc(%arg0: !fir.ref<!fir.array<42xi32>>) {
- omp.workshare {
- %c1_i32 = arith.constant 1 : i32
- %alloc = fir.alloca i32
- fir.store %c1_i32 to %alloc : !fir.ref<i32>
- %c42 = arith.constant 42 : index
- %0 = fir.shape %c42 : (index) -> !fir.shape<1>
- %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
- %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
- %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
- %true = arith.constant true
- %c1 = arith.constant 1 : index
- omp.workshare.loop_wrapper {
- omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
- %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
- %8 = fir.load %7 : !fir.ref<i32>
- %ld = fir.load %alloc : !fir.ref<i32>
- %n8 = arith.subi %8, %ld : i32
- %9 = arith.subi %n8, %c1_i32 : i32
- %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
- hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref<i32>
- omp.yield
- }
- omp.terminator
- }
- %4 = fir.undefined tuple<!fir.heap<!fir.array<42xi32>>, i1>
- %5 = fir.insert_value %4, %true, [1 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, i1) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
- %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple<!fir.heap<!fir.array<42xi32>>, i1>, !fir.heap<!fir.array<42xi32>>) -> tuple<!fir.heap<!fir.array<42xi32>>, i1>
- "test.test1"(%alloc) : (!fir.ref<i32>) -> ()
- hlfir.assign %3#0 to %1#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
- fir.freemem %3#0 : !fir.heap<!fir.array<42xi32>>
- omp.terminator
- }
- return
-}
-
-// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>,
-// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
-// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: return
-// CHECK: }
-
-// CHECK-LABEL: func.func @wsfunc(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
-// CHECK: omp.parallel {
-// CHECK: %[[VAL_1:.*]] = fir.alloca !fir.heap<!fir.array<42xi32>>
-// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_heap_42xi32 : !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
-// CHECK: %[[VAL_2:.*]] = arith.constant 42 : index
-// CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1>
-// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
-// CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
-// CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]](%[[VAL_3]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: %[[VAL_7:.*]] = arith.constant 42 : index
-// CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
-// CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_7]] : (index) -> !fir.shape<1>
-// CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_9]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
-// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]](%[[VAL_9]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
-// CHECK: %[[VAL_13:.*]] = arith.constant 1 : index
-// CHECK: omp.wsloop {
-// CHECK: omp.loop_nest (%[[VAL_14:.*]]) : index = (%[[VAL_13]]) to (%[[VAL_7]]) inclusive step (%[[VAL_13]]) {
-// CHECK: %[[VAL_15:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_14]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
-// CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref<i32>
-// CHECK: %[[VAL_17:.*]] = arith.subi %[[VAL_16]], %[[VAL_8]] : i32
-// CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_14]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
-// CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_18]] temporary_lhs : i32, !fir.ref<i32>
-// CHECK: omp.yield
-// CHECK: }
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: omp.single nowait {
-// CHECK: hlfir.assign %[[VAL_12]]#0 to %[[VAL_10]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
-// CHECK: fir.freemem %[[VAL_12]]#0 : !fir.heap<!fir.array<42xi32>>
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: omp.barrier
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: return
-// CHECK: }
-
-// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>,
-// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
-// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: return
-// CHECK: }
-
-// CHECK-LABEL: func.func private @_workshare_copy_i32(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
-// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
-// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
-// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
-// CHECK: return
-// CHECK: }
-
-// CHECK-LABEL: func.func @wsfunc(
-// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<42xi32>>) {
-// CHECK: %[[VAL_1:.*]] = fir.alloca i32
-// CHECK: %[[VAL_2:.*]] = fir.alloca !fir.heap<!fir.array<42xi32>>
-// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_i32 : !fir.ref<i32>, %[[VAL_2]] -> @_workshare_copy_heap_42xi32 : !fir.ref<!fir.heap<!fir.array<42xi32>>>) {
-// CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32
-// CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<i32>
-// CHECK: %[[VAL_4:.*]] = arith.constant 42 : index
-// CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
-// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_5]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
-// CHECK: %[[VAL_7:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""}
-// CHECK: fir.store %[[VAL_7]] to %[[VAL_2]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]](%[[VAL_5]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
-// CHECK: %[[VAL_10:.*]] = arith.constant 42 : index
-// CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1>
-// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_11]]) {uniq_name = "array"} : (!fir.ref<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>)
-// CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_2]] : !fir.ref<!fir.heap<!fir.array<42xi32>>>
-// CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_13]](%[[VAL_11]]) {uniq_name = ".tmp.array"} : (!fir.heap<!fir.array<42xi32>>, !fir.shape<1>) -> (!fir.heap<!fir.array<42xi32>>, !fir.heap<!fir.array<42xi32>>)
-// CHECK: %[[VAL_15:.*]] = arith.constant 1 : index
-// CHECK: omp.wsloop {
-// CHECK: omp.loop_nest (%[[VAL_16:.*]]) : index = (%[[VAL_15]]) to (%[[VAL_10]]) inclusive step (%[[VAL_15]]) {
-// CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_16]]) : (!fir.ref<!fir.array<42xi32>>, index) -> !fir.ref<i32>
-// CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]] : !fir.ref<i32>
-// CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
-// CHECK: %[[VAL_20:.*]] = arith.subi %[[VAL_18]], %[[VAL_19]] : i32
-// CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_9]] : i32
-// CHECK: %[[VAL_22:.*]] = hlfir.designate %[[VAL_14]]#0 (%[[VAL_16]]) : (!fir.heap<!fir.array<42xi32>>, index) -> !fir.ref<i32>
-// CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_22]] temporary_lhs : i32, !fir.ref<i32>
-// CHECK: omp.yield
-// CHECK: }
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: omp.single nowait {
-// CHECK: "test.test1"(%[[VAL_1]]) : (!fir.ref<i32>) -> ()
-// CHECK: hlfir.assign %[[VAL_14]]#0 to %[[VAL_12]]#0 : !fir.heap<!fir.array<42xi32>>, !fir.ref<!fir.array<42xi32>>
-// CHECK: fir.freemem %[[VAL_14]]#0 : !fir.heap<!fir.array<42xi32>>
-// CHECK: omp.terminator
-// CHECK: }
-// CHECK: omp.barrier
-// CHECK: return
-// CHECK: }
-
>From b710a580ab1732b3b41f0e3fb0684b45108d2c09 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Mon, 23 Sep 2024 16:04:07 +0900
Subject: [PATCH 04/10] clang-format
---
flang/include/flang/Tools/CLOptions.inc | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/flang/include/flang/Tools/CLOptions.inc b/flang/include/flang/Tools/CLOptions.inc
index bb00e079008a0b..81ce69b4ec7e1b 100644
--- a/flang/include/flang/Tools/CLOptions.inc
+++ b/flang/include/flang/Tools/CLOptions.inc
@@ -336,8 +336,8 @@ inline void createDefaultFIROptimizerPassPipeline(
/// \param pm - MLIR pass manager that will hold the pipeline definition
/// \param optLevel - optimization level used for creating FIR optimization
/// passes pipeline
-inline void createHLFIRToFIRPassPipeline(
- mlir::PassManager &pm, bool enableOpenMP, llvm::OptimizationLevel optLevel = defaultOptLevel) {
+inline void createHLFIRToFIRPassPipeline(mlir::PassManager &pm,
+ bool enableOpenMP, llvm::OptimizationLevel optLevel = defaultOptLevel) {
if (optLevel.isOptimizingForSpeed()) {
addCanonicalizerPassWithoutRegionSimplification(pm);
addNestedPassToAllTopLevelOperations(
>From df6bd4282f21590b9d086608cd2cc136b18d54df Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Mon, 23 Sep 2024 16:25:55 +0900
Subject: [PATCH 05/10] Fix todo tests
---
flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 2 +-
flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
index 1c47d448f597d9..d10996167ae623 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -1,4 +1,4 @@
-// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
// CHECK: not yet implemented: omp workshare with unstructured control flow
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
index bf6c196a05b4a3..46d2a8e8d48a8a 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -1,4 +1,4 @@
-// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
// CHECK: not yet implemented: omp workshare with unstructured control flow
>From 07a9eb3581f480c47ce4de3de00c7cef15df3cdc Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 14:21:14 +0900
Subject: [PATCH 06/10] Fix dst src in copy function
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index cf1867311cc236..baf8346e7608a9 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -162,8 +162,8 @@ static mlir::func::FuncOp createCopyFunc(mlir::Location loc, mlir::Type varType,
{loc, loc});
builder.setInsertionPointToStart(&funcOp.getRegion().back());
- Value loaded = builder.create<fir::LoadOp>(loc, funcOp.getArgument(0));
- builder.create<fir::StoreOp>(loc, loaded, funcOp.getArgument(1));
+ Value loaded = builder.create<fir::LoadOp>(loc, funcOp.getArgument(1));
+ builder.create<fir::StoreOp>(loc, loaded, funcOp.getArgument(0));
builder.create<mlir::func::ReturnOp>(loc);
return funcOp;
>From c3ff901b31806c73228e4f47a47f420c2d2465ed Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 14:38:48 +0900
Subject: [PATCH 07/10] Use omp.single to handle CFG cases
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 77 +++++++++++++------
1 file changed, 53 insertions(+), 24 deletions(-)
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index baf8346e7608a9..34399abbcd20ea 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -16,7 +16,6 @@
//
//===----------------------------------------------------------------------===//
-#include "flang/Optimizer/Builder/Todo.h"
#include <flang/Optimizer/Builder/FIRBuilder.h>
#include <flang/Optimizer/Dialect/FIROps.h>
#include <flang/Optimizer/Dialect/FIRType.h>
@@ -39,7 +38,6 @@
#include <mlir/IR/Visitors.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include <mlir/Support/LLVM.h>
-#include <mlir/Transforms/GreedyPatternRewriteDriver.h>
#include <variant>
@@ -96,6 +94,12 @@ bool shouldUseWorkshareLowering(Operation *op) {
if (isNestedIn<omp::SingleOp>(parentWorkshare, op))
return false;
+ if (parentWorkshare.getRegion().getBlocks().size() != 1) {
+ parentWorkshare->emitWarning(
+ "omp workshare with unstructured control flow currently unsupported.");
+ return false;
+ }
+
return true;
}
@@ -408,15 +412,6 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
OpBuilder rootBuilder(wsOp);
- // This operation is just a placeholder which will be erased later. We need it
- // because our `parallelizeRegion` function works on regions and not blocks.
- omp::WorkshareOp newOp =
- rootBuilder.create<omp::WorkshareOp>(loc, omp::WorkshareOperands());
- if (!wsOp.getNowait())
- rootBuilder.create<omp::BarrierOp>(loc);
-
- parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di);
-
// FIXME Currently, we only support workshare constructs with structured
// control flow. The transformation itself supports CFG, however, once we
// transform the MLIR region in the omp.workshare, we need to inline that
@@ -427,19 +422,53 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
// time when fir ops get lowered to CFG. However, SCF is not registered in
// flang so we cannot use it. Remove this requirement once we have
// scf.execute_region or an alternative operation available.
- if (wsOp.getRegion().getBlocks().size() != 1)
- TODO(wsOp->getLoc(), "omp workshare with unstructured control flow");
-
- // Inline the contents of the placeholder workshare op into its parent block.
- Block *theBlock = &newOp.getRegion().front();
- Operation *term = theBlock->getTerminator();
- Block *parentBlock = wsOp->getBlock();
- parentBlock->getOperations().splice(newOp->getIterator(),
- theBlock->getOperations());
- assert(term->getNumOperands() == 0);
- term->erase();
- newOp->erase();
- wsOp->erase();
+ if (wsOp.getRegion().getBlocks().size() == 1) {
+ // This operation is just a placeholder which will be erased later. We need
+ // it because our `parallelizeRegion` function works on regions and not
+ // blocks.
+ omp::WorkshareOp newOp =
+ rootBuilder.create<omp::WorkshareOp>(loc, omp::WorkshareOperands());
+ if (!wsOp.getNowait())
+ rootBuilder.create<omp::BarrierOp>(loc);
+
+ parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc,
+ di);
+
+ // Inline the contents of the placeholder workshare op into its parent
+ // block.
+ Block *theBlock = &newOp.getRegion().front();
+ Operation *term = theBlock->getTerminator();
+ Block *parentBlock = wsOp->getBlock();
+ parentBlock->getOperations().splice(newOp->getIterator(),
+ theBlock->getOperations());
+ assert(term->getNumOperands() == 0);
+ term->erase();
+ newOp->erase();
+ wsOp->erase();
+ } else {
+ // Otherwise just change the operation to an omp.single.
+
+ // `shouldUseWorkshareLowering` should have guaranteed that there are no
+ // omp.workshare_loop_wrapper's that bind to this omp.workshare.
+ assert(!wsOp->walk([&](Operation *op) {
+ // Nested omp.workshare can have their own
+ // omp.workshare_loop_wrapper's.
+ if (isa<omp::WorkshareOp>(op))
+ return WalkResult::skip();
+ if (isa<omp::WorkshareLoopWrapperOp>(op))
+ return WalkResult::interrupt();
+ return WalkResult::advance();
+ })
+ .wasInterrupted());
+
+ omp::SingleOperands operands;
+ operands.nowait = wsOp.getNowaitAttr();
+ omp::SingleOp newOp = rootBuilder.create<omp::SingleOp>(loc, operands);
+
+ newOp.getRegion().getBlocks().splice(newOp.getRegion().getBlocks().begin(),
+ wsOp.getRegion().getBlocks());
+ wsOp->erase();
+ }
return success();
}
>From 76b6a9f6fbcfdaded965134bb5ca9c775c840562 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 15:12:14 +0900
Subject: [PATCH 08/10] Fix lower workshare tests
---
flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir | 4 ++--
.../Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 7 ++++---
flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 7 ++++---
3 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
index d1bef3a359e487..618b8d9c19b6b1 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir
@@ -24,8 +24,8 @@ func.func @wsfunc() {
// CHECK-LABEL: func.func private @_workshare_copy_i32(
// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>,
// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>) {
-// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
-// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i32>
+// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+// CHECK: fir.store %[[VAL_2]] to %[[VAL_0]] : !fir.ref<i32>
// CHECK: return
// CHECK: }
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
index d10996167ae623..62d9da6c520f85 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -1,8 +1,9 @@
-// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
-// CHECK: not yet implemented: omp workshare with unstructured control flow
+// CHECK: omp.parallel
+// CHECK-NEXT: omp.single
-// Check that the definition of %r dominates its use post-transform
+// TODO Check that the definition of %r dominates its use post-transform
func.func @wsfunc() {
%a = fir.alloca i32
omp.parallel {
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
index 46d2a8e8d48a8a..d9551eb99f0762 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -1,8 +1,9 @@
-// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
-// CHECK: not yet implemented: omp workshare with unstructured control flow
+// CHECK: omp.parallel
+// CHECK-NEXT: omp.single
-// Check transforming a simple CFG
+// TODO Check transforming a simple CFG
func.func @wsfunc() {
%a = fir.alloca i32
omp.parallel {
>From 4c207b5c8e44d83eea08d283b8e3811585137744 Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 15:28:07 +0900
Subject: [PATCH 09/10] Different warning
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 9 +++++----
.../Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 2 ++
.../test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 2 ++
3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index 34399abbcd20ea..4d8e2a9a067141 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -94,11 +94,9 @@ bool shouldUseWorkshareLowering(Operation *op) {
if (isNestedIn<omp::SingleOp>(parentWorkshare, op))
return false;
- if (parentWorkshare.getRegion().getBlocks().size() != 1) {
- parentWorkshare->emitWarning(
- "omp workshare with unstructured control flow currently unsupported.");
+ // Do not use workshare lowering until we support CFG in omp.workshare
+ if (parentWorkshare.getRegion().getBlocks().size() != 1)
return false;
- }
return true;
}
@@ -448,6 +446,9 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) {
} else {
// Otherwise just change the operation to an omp.single.
+ wsOp->emitWarning("omp workshare with unstructured control flow currently "
+ "unsupported and will be serialized.");
+
// `shouldUseWorkshareLowering` should have guaranteed that there are no
// omp.workshare_loop_wrapper's that bind to this omp.workshare.
assert(!wsOp->walk([&](Operation *op) {
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
index 62d9da6c520f85..96dc878bed0c99 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir
@@ -1,5 +1,7 @@
// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized.
+
// CHECK: omp.parallel
// CHECK-NEXT: omp.single
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
index d9551eb99f0762..ce8a4eb96982be 100644
--- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
+++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir
@@ -1,5 +1,7 @@
// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s
+// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized.
+
// CHECK: omp.parallel
// CHECK-NEXT: omp.single
>From c5b5369be3d0db31d9ded0eeeb8e28e03d25bd9e Mon Sep 17 00:00:00 2001
From: Ivan Radanov Ivanov <ivanov.i.aa at m.titech.ac.jp>
Date: Fri, 4 Oct 2024 22:45:09 +0900
Subject: [PATCH 10/10] Fix bug and add better clarification comments
---
flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 28 ++++++++++++++++---
.../lower-workshare-correct-parallelize.mlir | 16 +++++++++++
2 files changed, 40 insertions(+), 4 deletions(-)
create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
index 4d8e2a9a067141..84cf5e82167987 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp
@@ -35,6 +35,7 @@
#include <mlir/IR/IRMapping.h>
#include <mlir/IR/OpDefinition.h>
#include <mlir/IR/PatternMatch.h>
+#include <mlir/IR/Value.h>
#include <mlir/IR/Visitors.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include <mlir/Support/LLVM.h>
@@ -188,14 +189,19 @@ static bool isTransitivelyUsedOutside(Value v, SingleRegion sr) {
if (isUserOutsideSR(user, parentOp, sr))
return true;
- // Results of nested users cannot be used outside of the SR
+ // Now we know user is inside `sr`.
+
+ // Results of nested users cannot be used outside of `sr`.
if (user->getBlock() != srBlock)
continue;
- // A non-safe to parallelize operation will be handled separately
+ // A non-safe to parallelize operation will be checked for uses outside
+ // separately.
if (!isSafeToParallelize(user))
continue;
+ // For safe to parallelize operations, we need to check if there is a
+ // transitive use of `v` through them.
for (auto res : user->getResults())
if (isTransitivelyUsedOutside(res, sr))
return true;
@@ -242,7 +248,21 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
for (Operation &op : llvm::make_range(sr.begin, sr.end)) {
if (isSafeToParallelize(&op)) {
singleBuilder.clone(op, singleMapping);
- parallelBuilder.clone(op, rootMapping);
+ if (llvm::all_of(op.getOperands(), [&](Value opr) {
+ return rootMapping.contains(opr);
+ })) {
+ // Safe to parallelize operations which have all operands available in
+ // the root parallel block can be executed there.
+ parallelBuilder.clone(op, rootMapping);
+ } else {
+ // If any operand was not available, it means that there was no
+ // transitive use of a non-safe-to-parallelize operation outside `sr`.
+ // This means that there should be no transitive uses outside `sr` of
+ // `op`.
+ assert(llvm::all_of(op.getResults(), [&](Value v) {
+ return !isTransitivelyUsedOutside(v, sr);
+ }));
+ }
} else if (auto alloca = dyn_cast<fir::AllocaOp>(&op)) {
auto hoisted =
cast<fir::AllocaOp>(allocaBuilder.clone(*alloca, singleMapping));
@@ -252,7 +272,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
} else {
singleBuilder.clone(op, singleMapping);
// Prepare reloaded values for results of operations that cannot be
- // safely parallelized and which are used after the region `sr`
+ // safely parallelized and which are used after the region `sr`.
for (auto res : op.getResults()) {
if (isTransitivelyUsedOutside(res, sr)) {
auto alloc = mapReloadedValue(res, allocaBuilder, singleBuilder,
diff --git a/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
new file mode 100644
index 00000000000000..99ca4fe5a0e212
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir
@@ -0,0 +1,16 @@
+// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
+
+// Check that the safe to parallelize `fir.declare` op will not be parallelized
+// due to its operand %alloc not being reloaded outside the omp.single.
+
+func.func @foo() {
+ %c0 = arith.constant 0 : index
+ omp.workshare {
+ %alloc = fir.allocmem !fir.array<?xf32>, %c0 {bindc_name = ".tmp.forall", uniq_name = ""}
+ %shape = fir.shape %c0 : (index) -> !fir.shape<1>
+ %declare = fir.declare %alloc(%shape) {uniq_name = ".tmp.forall"} : (!fir.heap<!fir.array<?xf32>>, !fir.shape<1>) -> !fir.heap<!fir.array<?xf32>>
+ fir.freemem %alloc : !fir.heap<!fir.array<?xf32>>
+ omp.terminator
+ }
+ return
+}
More information about the llvm-branch-commits
mailing list