[clang] 782c525 - [OpenMP] Patch for Support to loop bind clause : Checking Parent Region (#76938)
via cfe-commits
cfe-commits at lists.llvm.org
Mon Jan 8 21:45:00 PST 2024
Author: SunilKuravinakop
Date: 2024-01-09T11:14:56+05:30
New Revision: 782c5250077cf472941f0ab7555f87ff22d6e724
URL: https://github.com/llvm/llvm-project/commit/782c5250077cf472941f0ab7555f87ff22d6e724
DIFF: https://github.com/llvm/llvm-project/commit/782c5250077cf472941f0ab7555f87ff22d6e724.diff
LOG: [OpenMP] Patch for Support to loop bind clause : Checking Parent Region (#76938)
Changes uploaded to the phabricator on Dec 16th are lost because the
phabricator is down. Hence re-uploading it to the github.com.
Changes to be committed:
modified: clang/include/clang/Sema/Sema.h
modified: clang/lib/Sema/SemaOpenMP.cpp
modified: clang/test/OpenMP/generic_loop_ast_print.cpp
modified: clang/test/OpenMP/loop_bind_messages.cpp
modified: clang/test/PCH/pragma-loop.cpp
---------
Co-authored-by: Sunil Kuravinakop
Added:
Modified:
clang/include/clang/Sema/Sema.h
clang/lib/Sema/SemaOpenMP.cpp
clang/test/OpenMP/loop_bind_messages.cpp
clang/test/PCH/pragma-loop.cpp
Removed:
################################################################################
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 8f44adef38159e..4c464a1ae4c67f 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -11346,9 +11346,12 @@ class Sema final {
/// rigorous semantic checking in the new mapped directives.
bool mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
ArrayRef<OMPClause *> Clauses,
- OpenMPBindClauseKind BindKind,
+ OpenMPBindClauseKind &BindKind,
OpenMPDirectiveKind &Kind,
- OpenMPDirectiveKind &PrevMappedDirective);
+ OpenMPDirectiveKind &PrevMappedDirective,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ const DeclarationNameInfo &DirName,
+ OpenMPDirectiveKind CancelRegion);
public:
/// The declarator \p D defines a function in the scope \p S which is nested
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index f34d2959dc6191..365032c9642123 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -5072,6 +5072,18 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
CurrentRegion != OMPD_cancellation_point &&
CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
return false;
+ // Checks needed for mapping "loop" construct. Please check mapLoopConstruct
+ // for a detailed explanation
+ if (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion == OMPD_loop &&
+ (BindKind == OMPC_BIND_parallel || BindKind == OMPC_BIND_teams) &&
+ (isOpenMPWorksharingDirective(ParentRegion) ||
+ ParentRegion == OMPD_loop)) {
+ int ErrorMsgNumber = (BindKind == OMPC_BIND_parallel) ? 1 : 4;
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
+ << true << getOpenMPDirectiveName(ParentRegion) << ErrorMsgNumber
+ << getOpenMPDirectiveName(CurrentRegion);
+ return true;
+ }
if (CurrentRegion == OMPD_cancellation_point ||
CurrentRegion == OMPD_cancel) {
// OpenMP [2.16, Nesting of Regions]
@@ -6124,21 +6136,25 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
ArrayRef<OMPClause *> Clauses,
- OpenMPBindClauseKind BindKind,
+ OpenMPBindClauseKind &BindKind,
OpenMPDirectiveKind &Kind,
- OpenMPDirectiveKind &PrevMappedDirective) {
+ OpenMPDirectiveKind &PrevMappedDirective,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ const DeclarationNameInfo &DirName,
+ OpenMPDirectiveKind CancelRegion) {
bool UseClausesWithoutBind = false;
// Restricting to "#pragma omp loop bind"
if (getLangOpts().OpenMP >= 50 && Kind == OMPD_loop) {
+
+ const OpenMPDirectiveKind ParentDirective = DSAStack->getParentDirective();
+
if (BindKind == OMPC_BIND_unknown) {
// Setting the enclosing teams or parallel construct for the loop
// directive without bind clause.
BindKind = OMPC_BIND_thread; // Default bind(thread) if binding is unknown
- const OpenMPDirectiveKind ParentDirective =
- DSAStack->getParentDirective();
if (ParentDirective == OMPD_unknown) {
Diag(DSAStack->getDefaultDSALocation(),
diag::err_omp_bind_required_on_loop);
@@ -6150,9 +6166,10 @@ bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
BindKind = OMPC_BIND_teams;
}
} else {
- // bind clause is present, so we should set flag indicating to only
- // use the clauses that aren't the bind clause for the new directive that
- // loop is lowered to.
+ // bind clause is present in loop directive. When the loop directive is
+ // changed to a new directive the bind clause is not used. So, we should
+ // set flag indicating to only use the clauses that aren't the
+ // bind clause.
UseClausesWithoutBind = true;
}
@@ -6213,26 +6230,35 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind PrevMappedDirective) {
StmtResult Res = StmtError();
OpenMPBindClauseKind BindKind = OMPC_BIND_unknown;
+ llvm::SmallVector<OMPClause *> ClausesWithoutBind;
+ bool UseClausesWithoutBind = false;
+
if (const OMPBindClause *BC =
OMPExecutableDirective::getSingleClause<OMPBindClause>(Clauses))
BindKind = BC->getBindKind();
+
+ // Variable used to note down the DirectiveKind because mapLoopConstruct may
+ // change "Kind" variable, due to mapping of "omp loop" to other directives.
+ OpenMPDirectiveKind DK = Kind;
+ if (Kind == OMPD_loop || PrevMappedDirective == OMPD_loop) {
+ UseClausesWithoutBind = mapLoopConstruct(
+ ClausesWithoutBind, Clauses, BindKind, Kind, PrevMappedDirective,
+ StartLoc, EndLoc, DirName, CancelRegion);
+ DK = OMPD_loop;
+ }
+
// First check CancelRegion which is then used in checkNestingOfRegions.
if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
- checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
- BindKind, StartLoc))
+ checkNestingOfRegions(*this, DSAStack, DK, DirName, CancelRegion,
+ BindKind, StartLoc)) {
return StmtError();
+ }
// Report affected OpenMP target offloading behavior when in HIP lang-mode.
if (getLangOpts().HIP && (isOpenMPTargetExecutionDirective(Kind) ||
isOpenMPTargetDataManagementDirective(Kind)))
Diag(StartLoc, diag::warn_hip_omp_target_directives);
- llvm::SmallVector<OMPClause *> ClausesWithoutBind;
- bool UseClausesWithoutBind = false;
-
- UseClausesWithoutBind = mapLoopConstruct(ClausesWithoutBind, Clauses,
- BindKind, Kind, PrevMappedDirective);
-
llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
VarsWithInheritedDSAType VarsWithInheritedDSA;
bool ErrorFound = false;
diff --git a/clang/test/OpenMP/loop_bind_messages.cpp b/clang/test/OpenMP/loop_bind_messages.cpp
index f7fdf289714328..becd1f40c0c047 100644
--- a/clang/test/OpenMP/loop_bind_messages.cpp
+++ b/clang/test/OpenMP/loop_bind_messages.cpp
@@ -4,6 +4,7 @@
#define NNN 50
int aaa[NNN];
+int aaa2[NNN][NNN];
void parallel_loop() {
#pragma omp parallel
@@ -13,10 +14,82 @@ void parallel_loop() {
aaa[j] = j*NNN;
}
}
+
+ #pragma omp parallel for
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+
+ #pragma omp parallel
+ #pragma omp for nowait
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+
+ #pragma omp parallel for
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp nothing
+ #pragma omp loop
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+
+ #pragma omp target teams distribute parallel for
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'target teams distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+
+ #pragma omp target parallel
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+
+ #pragma omp parallel for
+ for (int i = 0; i < 100; ++i) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'loop' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[j] = j*NNN;
+ }
+ }
+ }
+
+ #pragma omp parallel
+ {
+ #pragma omp sections
+ {
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'sections' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+
+ #pragma omp section
+ {
+ aaa[NNN-1] = NNN;
+ }
+ }
+ }
}
void teams_loop() {
- int var1, var2;
+ int var1;
+ int total = 0;
#pragma omp teams
{
@@ -32,24 +105,22 @@ void teams_loop() {
}
}
}
-}
-void orphan_loop_with_bind() {
- #pragma omp loop bind(parallel)
- for (int j = 0 ; j < NNN ; j++) {
- aaa[j] = j*NNN;
+ #pragma omp target teams
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(teams)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
}
-}
-void orphan_loop_no_bind() {
- #pragma omp loop // expected-error{{expected 'bind' clause for 'loop' construct without an enclosing OpenMP construct}}
- for (int j = 0 ; j < NNN ; j++) {
- aaa[j] = j*NNN;
+ #pragma omp target teams distribute parallel for
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'target teams distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
}
-}
-
-void teams_loop_reduction() {
- int total = 0;
#pragma omp teams
{
@@ -63,14 +134,98 @@ void teams_loop_reduction() {
total+=aaa[j];
}
}
+
+ #pragma omp teams num_teams(8) thread_limit(256)
+ #pragma omp distribute parallel for dist_schedule(static, 1024) \
+ schedule(static, 64)
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(teams) // expected-error{{'distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
+ for (int j = 0; j < NNN; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+
+ #pragma omp teams
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(thread)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[i] = i+i*NNN;
+ }
+ }
+
+ #pragma omp teams loop
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[i] = i+i*NNN;
+ }
+ }
+
+ #pragma omp teams loop
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'teams loop' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[i] = i+i*NNN;
+ }
+ }
+}
+
+void thread_loop() {
+ #pragma omp parallel
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(thread)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[i] = i+i*NNN;
+ }
+ }
+
+ #pragma omp teams
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(thread)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[i] = i+i*NNN;
+ }
+ }
+}
+
+void parallel_for_with_loop_teams_bind(){
+ #pragma omp parallel for
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[i] = i+i*NNN;
+ }
+ }
+}
+
+void orphan_loops() {
+ #pragma omp loop // expected-error{{expected 'bind' clause for 'loop' construct without an enclosing OpenMP construct}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[j] = j*NNN;
+ }
+
+ #pragma omp loop bind(parallel)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[j] = j*NNN;
+ }
+
+ #pragma omp loop bind(teams)
+ for (int i = 0; i < NNN; i++) {
+ aaa[i] = i+i*NNN;
+ }
+
+ #pragma omp loop bind(thread)
+ for (int i = 0; i < NNN; i++) {
+ aaa[i] = i+i*NNN;
+ }
}
int main(int argc, char *argv[]) {
parallel_loop();
teams_loop();
- orphan_loop_with_bind();
- orphan_loop_no_bind();
- teams_loop_reduction();
+ thread_loop();
+ parallel_for_with_loop_teams_bind();
+ orphan_loops();
}
#endif
diff --git a/clang/test/PCH/pragma-loop.cpp b/clang/test/PCH/pragma-loop.cpp
index f5de630ffc9120..a3c6871041c0ee 100644
--- a/clang/test/PCH/pragma-loop.cpp
+++ b/clang/test/PCH/pragma-loop.cpp
@@ -116,9 +116,13 @@ class pragma_test {
inline void run10(int *List, int Length) {
int i = 0;
-#pragma omp loop bind(teams)
+ int j = 0;
+ #pragma omp teams
for (int i = 0; i < Length; i++) {
- List[i] = i;
+ #pragma omp loop bind(teams)
+ for (int j = 0; j < Length; j++) {
+ List[i] = i+j;
+ }
}
}
More information about the cfe-commits
mailing list