[clang] [OpenMP][CodeGen] Improved codegen for combined loop directives (PR #87278)
via cfe-commits
cfe-commits at lists.llvm.org
Mon Apr 1 12:56:39 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang-modules
Author: David Pagan (ddpagan)
<details>
<summary>Changes</summary>
IR for 'target teams loop' is now dependent on suitability of associated loop-nest.
If a loop-nest:
- does not contain a function call, or
- the -fopenmp-assume-no-nested-parallelism has been specified,
- or the call is to an OpenMP API AND
- does not contain nested loop bind(parallel) directives
then it can be emitted as 'target teams distribute parallel for', which is the current default. Otherwise, it is emitted as 'target teams distribute'.
Added debug output indicating how 'target teams loop' was emitted. Flag is -mllvm -debug-only=target-teams-loop-codegen
Added LIT tests explicitly verifying 'target teams loop' emitted as a parallel loop and a distribute loop.
Updated other 'loop' related tests as needed to reflect change in IR.
- These updates account for most of the changed files and additions/deletions.
---
Patch is 1.04 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/87278.diff
21 Files Affected:
- (modified) clang/include/clang/AST/StmtOpenMP.h (+10-1)
- (modified) clang/lib/AST/StmtOpenMP.cpp (+2-1)
- (modified) clang/lib/CodeGen/CGOpenMPRuntime.cpp (+9-6)
- (modified) clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp (+9-3)
- (modified) clang/lib/CodeGen/CGStmtOpenMP.cpp (+72-15)
- (modified) clang/lib/Sema/SemaOpenMP.cpp (+83-3)
- (modified) clang/lib/Serialization/ASTReaderStmt.cpp (+1)
- (modified) clang/lib/Serialization/ASTWriterStmt.cpp (+1)
- (modified) clang/test/OpenMP/nvptx_target_teams_generic_loop_codegen.cpp (+39-9)
- (modified) clang/test/OpenMP/nvptx_target_teams_generic_loop_generic_mode_codegen.cpp (+82-315)
- (modified) clang/test/OpenMP/target_teams_generic_loop_codegen.cpp (+8-1124)
- (added) clang/test/OpenMP/target_teams_generic_loop_codegen_as_distribute.cpp (+587)
- (added) clang/test/OpenMP/target_teams_generic_loop_codegen_as_parallel_for.cpp (+3998)
- (modified) clang/test/OpenMP/target_teams_generic_loop_if_codegen.cpp (+114-596)
- (modified) clang/test/OpenMP/target_teams_generic_loop_order_codegen.cpp (+1-1)
- (modified) clang/test/OpenMP/target_teams_generic_loop_private_codegen.cpp (+240-1198)
- (modified) clang/test/OpenMP/teams_generic_loop_codegen-1.cpp (+148-1212)
- (modified) clang/test/OpenMP/teams_generic_loop_codegen.cpp (+138-492)
- (modified) clang/test/OpenMP/teams_generic_loop_collapse_codegen.cpp (+136-672)
- (modified) clang/test/OpenMP/teams_generic_loop_private_codegen.cpp (+103-582)
- (modified) clang/test/OpenMP/teams_generic_loop_reduction_codegen.cpp (+109-676)
``````````diff
diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h
index 3cb3c1014d73b7..f735fa5643aecf 100644
--- a/clang/include/clang/AST/StmtOpenMP.h
+++ b/clang/include/clang/AST/StmtOpenMP.h
@@ -6109,6 +6109,8 @@ class OMPTeamsGenericLoopDirective final : public OMPLoopDirective {
class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
+ /// true if loop directive's associated loop can be a parallel for.
+ bool CanBeParallelFor = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
@@ -6131,6 +6133,9 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
llvm::omp::OMPD_target_teams_loop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
+ /// Set whether associated loop can be a parallel for.
+ void setCanBeParallelFor(bool ParFor) { CanBeParallelFor = ParFor; }
+
public:
/// Creates directive with a list of \p Clauses.
///
@@ -6145,7 +6150,7 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
static OMPTargetTeamsGenericLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, bool CanBeParallelFor);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -6159,6 +6164,10 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
unsigned CollapsedNum,
EmptyShell);
+ /// Return true if current loop directive's associated loop can be a
+ /// parallel for.
+ bool canBeParallelFor() const { return CanBeParallelFor; }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass;
}
diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp
index 426b35848cb5c8..d8519b2071e6da 100644
--- a/clang/lib/AST/StmtOpenMP.cpp
+++ b/clang/lib/AST/StmtOpenMP.cpp
@@ -2431,7 +2431,7 @@ OMPTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C,
OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+ const HelperExprs &Exprs, bool CanBeParallelFor) {
auto *Dir = createDirective<OMPTargetTeamsGenericLoopDirective>(
C, Clauses, AssociatedStmt,
numLoopChildren(CollapsedNum, OMPD_target_teams_loop), StartLoc, EndLoc,
@@ -2473,6 +2473,7 @@ OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ Dir->setCanBeParallelFor(CanBeParallelFor);
return Dir;
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index bc363313dec6f8..dba5312ff8d058 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -2656,11 +2656,12 @@ void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
// Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc,
- isOpenMPDistributeDirective(DKind)
+ isOpenMPDistributeDirective(DKind) ||
+ (DKind == OMPD_target_teams_loop)
? OMP_IDENT_WORK_DISTRIBUTE
- : isOpenMPLoopDirective(DKind)
- ? OMP_IDENT_WORK_LOOP
- : OMP_IDENT_WORK_SECTIONS),
+ : isOpenMPLoopDirective(DKind)
+ ? OMP_IDENT_WORK_LOOP
+ : OMP_IDENT_WORK_SECTIONS),
getThreadID(CGF, Loc)};
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
if (isOpenMPDistributeDirective(DKind) &&
@@ -8885,7 +8886,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
- // For now, just treat 'target teams loop' as if it's distributed.
+ // For now, treat 'target' with nested 'teams loop' as if it's
+ // distributed (target teams distribute).
if (isOpenMPDistributeDirective(DKind) || DKind == OMPD_teams_loop)
return NestedDir;
if (DKind == OMPD_teams) {
@@ -9369,7 +9371,8 @@ llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall(
SizeEmitter) {
OpenMPDirectiveKind Kind = D.getDirectiveKind();
const OMPExecutableDirective *TD = &D;
- // Get nested teams distribute kind directive, if any.
+ // Get nested teams distribute kind directive, if any. For now, treat
+ // 'target_teams_loop' as if it's really a target_teams_distribute.
if ((!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind)) &&
Kind != OMPD_target_teams_loop)
TD = getNestedDistributeDirective(CGM.getContext(), D);
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 5baac8f0e3e268..cf4e93112b10e8 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -639,14 +639,14 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
return false;
}
-static bool supportsSPMDExecutionMode(ASTContext &Ctx,
+static bool supportsSPMDExecutionMode(CodeGenModule &CGM,
const OMPExecutableDirective &D) {
+ ASTContext &Ctx = CGM.getContext();
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
switch (DirectiveKind) {
case OMPD_target:
case OMPD_target_teams:
return hasNestedSPMDDirective(Ctx, D);
- case OMPD_target_teams_loop:
case OMPD_target_parallel_loop:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
@@ -658,6 +658,12 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
return true;
case OMPD_target_teams_distribute:
return false;
+ case OMPD_target_teams_loop:
+ // Whether this is true or not depends on how the directive will
+ // eventually be emitted.
+ if (auto *TTLD = dyn_cast<OMPTargetTeamsGenericLoopDirective>(&D))
+ return TTLD->canBeParallelFor();
+ return false;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
@@ -872,7 +878,7 @@ void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
assert(!ParentName.empty() && "Invalid target region parent name!");
- bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
+ bool Mode = supportsSPMDExecutionMode(CGM, D);
bool IsBareKernel = D.getSingleClause<OMPXBareClause>();
if (Mode || IsBareKernel)
emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index e6d504bcdeca5b..3bf99366b69ced 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
@@ -34,11 +35,14 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Debug.h"
#include <optional>
using namespace clang;
using namespace CodeGen;
using namespace llvm::omp;
+#define TTL_CODEGEN_TYPE "target-teams-loop-codegen"
+
static const VarDecl *getBaseDecl(const Expr *Ref);
namespace {
@@ -1432,9 +1436,12 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
*this, D.getBeginLoc(),
isOpenMPWorksharingDirective(D.getDirectiveKind()));
}
+ bool TeamsLoopCanBeParallel = false;
+ if (auto *TTLD = dyn_cast<OMPTargetTeamsGenericLoopDirective>(&D))
+ TeamsLoopCanBeParallel = TTLD->canBeParallelFor();
bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
isOpenMPParallelDirective(D.getDirectiveKind()) ||
- ReductionKind == OMPD_simd;
+ TeamsLoopCanBeParallel || ReductionKind == OMPD_simd;
bool SimpleReduction = ReductionKind == OMPD_simd;
// Emit nowait reduction if nowait clause is present or directive is a
// parallel directive (it always has implicit barrier).
@@ -7928,11 +7935,9 @@ void CodeGenFunction::EmitOMPParallelGenericLoopDirective(
void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
const OMPTeamsGenericLoopDirective &S) {
// To be consistent with current behavior of 'target teams loop', emit
- // 'teams loop' as if its constituent constructs are 'distribute,
- // 'parallel, and 'for'.
+ // 'teams loop' as if its constituent constructs are 'teams' and 'distribute'.
auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
- S.getDistInc());
+ CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
};
// Emit teams region as a standalone region.
@@ -7946,15 +7951,33 @@ void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
CodeGenDistribute);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
- emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
+ emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
emitPostUpdateForReductionClause(*this, S,
[](CodeGenFunction &) { return nullptr; });
}
-static void
-emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF,
- const OMPTargetTeamsGenericLoopDirective &S,
- PrePostActionTy &Action) {
+static void emitTargetTeamsLoopCodegenStatus(CodeGenFunction &CGF,
+ std::string StatusMsg,
+ const OMPExecutableDirective &D) {
+#ifndef NDEBUG
+ bool IsDevice = CGF.CGM.getLangOpts().OpenMPIsTargetDevice;
+ if (IsDevice)
+ StatusMsg += ": DEVICE";
+ else
+ StatusMsg += ": HOST";
+ SourceLocation L = D.getBeginLoc();
+ auto &SM = CGF.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(L);
+ const char *FileName = PLoc.isValid() ? PLoc.getFilename() : nullptr;
+ unsigned LineNo =
+ PLoc.isValid() ? PLoc.getLine() : SM.getExpansionLineNumber(L);
+ llvm::dbgs() << StatusMsg << ": " << FileName << ": " << LineNo << "\n";
+#endif
+}
+
+static void emitTargetTeamsGenericLoopRegionAsParallel(
+ CodeGenFunction &CGF, PrePostActionTy &Action,
+ const OMPTargetTeamsGenericLoopDirective &S) {
Action.Enter(CGF);
// Emit 'teams loop' as if its constituent constructs are 'distribute,
// 'parallel, and 'for'.
@@ -7974,19 +7997,50 @@ emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF,
CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
-
+ DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE,
+ emitTargetTeamsLoopCodegenStatus(
+ CGF, TTL_CODEGEN_TYPE " as parallel for", S));
emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
CodeGenTeams);
emitPostUpdateForReductionClause(CGF, S,
[](CodeGenFunction &) { return nullptr; });
}
-/// Emit combined directive 'target teams loop' as if its constituent
-/// constructs are 'target', 'teams', 'distribute', 'parallel', and 'for'.
+static void emitTargetTeamsGenericLoopRegionAsDistribute(
+ CodeGenFunction &CGF, PrePostActionTy &Action,
+ const OMPTargetTeamsGenericLoopDirective &S) {
+ Action.Enter(CGF);
+ // Emit 'teams loop' as if its constituent construct is 'distribute'.
+ auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
+ };
+
+ // Emit teams region as a standalone region.
+ auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
+ CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
+ };
+ DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE,
+ emitTargetTeamsLoopCodegenStatus(
+ CGF, TTL_CODEGEN_TYPE " as distribute", S));
+ emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen);
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
+}
+
void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDirective(
const OMPTargetTeamsGenericLoopDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
- emitTargetTeamsGenericLoopRegion(CGF, S, Action);
+ if (S.canBeParallelFor())
+ emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S);
+ else
+ emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S);
};
emitCommonOMPTargetDirective(*this, S, CodeGen);
}
@@ -7996,7 +8050,10 @@ void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction(
const OMPTargetTeamsGenericLoopDirective &S) {
// Emit SPMD target parallel loop region as a standalone region.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
- emitTargetTeamsGenericLoopRegion(CGF, S, Action);
+ if (S.canBeParallelFor())
+ emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S);
+ else
+ emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S);
};
llvm::Function *Fn;
llvm::Constant *Addr;
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 0ba54a3a9cae35..c814535ad6bdb7 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -4478,6 +4478,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
+ // For 'target teams loop', collect all captured regions so codegen can
+ // later decide the best IR to emit given the associated loop-nest.
case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
@@ -6135,6 +6137,79 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
}
}
+namespace {
+/// A 'teams loop' with a nested 'loop bind(parallel)' or generic function
+/// call in the associated loop-nest cannot be a 'parallel for'.
+class TeamsLoopChecker final : public ConstStmtVisitor<TeamsLoopChecker> {
+ Sema &SemaRef;
+
+public:
+ bool teamsLoopCanBeParallelFor() const { return TeamsLoopCanBeParallelFor; }
+
+ // Is there a nested OpenMP loop bind(parallel)
+ void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
+ if (D->getDirectiveKind() == llvm::omp::Directive::OMPD_loop) {
+ if (const auto *C = D->getSingleClause<OMPBindClause>())
+ if (C->getBindKind() == OMPC_BIND_parallel) {
+ TeamsLoopCanBeParallelFor = false;
+ // No need to continue visiting any more
+ return;
+ }
+ }
+ for (const Stmt *Child : D->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ void VisitCallExpr(const CallExpr *C) {
+ // Function calls inhibit parallel loop translation of 'target teams loop'
+ // unless the assume-no-nested-parallelism flag has been specified.
+ // OpenMP API runtime library calls do not inhibit parallel loop
+ // translation, regardless of the assume-no-nested-parallelism.
+ if (C) {
+ bool IsOpenMPAPI = false;
+ auto *FD = dyn_cast_or_null<FunctionDecl>(C->getCalleeDecl());
+ if (FD) {
+ std::string Name = FD->getNameInfo().getAsString();
+ IsOpenMPAPI = Name.find("omp_") == 0;
+ }
+ TeamsLoopCanBeParallelFor =
+ IsOpenMPAPI || SemaRef.getLangOpts().OpenMPNoNestedParallelism;
+ if (!TeamsLoopCanBeParallelFor)
+ return;
+ }
+ for (const Stmt *Child : C->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ void VisitCapturedStmt(const CapturedStmt *S) {
+ if (!S)
+ return;
+ Visit(S->getCapturedDecl()->getBody());
+ }
+
+ void VisitStmt(const Stmt *S) {
+ if (!S)
+ return;
+ for (const Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+ }
+ explicit TeamsLoopChecker(Sema &SemaRef)
+ : SemaRef(SemaRef), TeamsLoopCanBeParallelFor(true) {}
+
+private:
+ bool TeamsLoopCanBeParallelFor;
+};
+} // namespace
+
+static bool teamsLoopCanBeParallelFor(Stmt *AStmt, Sema &SemaRef) {
+ TeamsLoopChecker Checker(SemaRef);
+ Checker.Visit(AStmt);
+ return Checker.teamsLoopCanBeParallelFor();
+}
+
bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
ArrayRef<OMPClause *> Clauses,
OpenMPBindClauseKind &BindKind,
@@ -10895,7 +10970,8 @@ StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective(
setFunctionHasBranchProtectedScope();
return OMPTargetTeamsGenericLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ teamsLoopCanBeParallelFor(AStmt, *this));
}
StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective(
@@ -15645,6 +15721,12 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
CaptureRegion = OMPD_target;
break;
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ // For [target] teams loop, assume capture region is 'teams' so it's
+ // available for codegen later to use if/when necessary.
+ CaptureRegion = OMPD_teams;
+ break;
case OMPD_target_teams_distribute_parallel_for_simd:
if (OpenMPVersion >= 50 &&
(NameModifier == OMPD_unknown || NameModifier == OMPD_simd)) {
@@ -15652,7 +15734,6 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
break;
}
[[fallthrough]];
- case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
// If this clause applies to the nested 'parallel' region, capture within
// the 'teams' region, otherwise do not capture.
@@ -15775,7 +15856,6 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_loop:
- case OMPD_teams_loop:
case OMPD_teams:
case OMPD_tile:
case OMPD_unroll:
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index 674ed47581dfd3..8db1ec7be91cc2 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -2776,6 +2776,7 @@ void ASTStmtReader::VisitOMPTeamsGenericLoopDirective(
void ASTStmtReader::VisitOMPTargetTeamsGenericLoopDirective(
OMPTargetTeamsGenericLoopDirective *D) {
VisitOMPLoopDirective(D);
+ D->setCanBeParallelFor(Record.readBool());
}
void ASTStmtReader::VisitOMPParallelGenericLoopDirective(
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serializati...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/87278
More information about the cfe-commits
mailing list