[llvm] f82ec55 - [OpenMP] Initial parsing/sema for the 'omp target parallel loop' construct

Mike Rice via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 24 09:19:25 PDT 2022


Author: Mike Rice
Date: 2022-03-24T09:19:00-07:00
New Revision: f82ec5532b2f303732e547226816d7a668db3050

URL: https://github.com/llvm/llvm-project/commit/f82ec5532b2f303732e547226816d7a668db3050
DIFF: https://github.com/llvm/llvm-project/commit/f82ec5532b2f303732e547226816d7a668db3050.diff

LOG: [OpenMP] Initial parsing/sema for the 'omp target parallel loop' construct

 Adds basic parsing/sema/serialization support for the
 #pragma omp target parallel loop directive.

Differential Revision: https://reviews.llvm.org/D122359

Added: 
    clang/test/OpenMP/target_parallel_generic_loop_ast_print.cpp
    clang/test/OpenMP/target_parallel_generic_loop_messages.cpp

Modified: 
    clang/include/clang-c/Index.h
    clang/include/clang/AST/RecursiveASTVisitor.h
    clang/include/clang/AST/StmtOpenMP.h
    clang/include/clang/Basic/StmtNodes.td
    clang/include/clang/Sema/Sema.h
    clang/include/clang/Serialization/ASTBitCodes.h
    clang/lib/AST/StmtOpenMP.cpp
    clang/lib/AST/StmtPrinter.cpp
    clang/lib/AST/StmtProfile.cpp
    clang/lib/Basic/OpenMPKinds.cpp
    clang/lib/CodeGen/CGStmt.cpp
    clang/lib/Parse/ParseOpenMP.cpp
    clang/lib/Sema/SemaExceptionSpec.cpp
    clang/lib/Sema/SemaOpenMP.cpp
    clang/lib/Sema/TreeTransform.h
    clang/lib/Serialization/ASTReaderStmt.cpp
    clang/lib/Serialization/ASTWriterStmt.cpp
    clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
    clang/test/Analysis/cfg-openmp.cpp
    clang/tools/libclang/CIndex.cpp
    clang/tools/libclang/CXCursor.cpp
    llvm/include/llvm/Frontend/OpenMP/OMP.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h
index 4783744359eec..f28601c37d8ef 100644
--- a/clang/include/clang-c/Index.h
+++ b/clang/include/clang-c/Index.h
@@ -2612,7 +2612,11 @@ enum CXCursorKind {
    */
   CXCursor_OMPParallelGenericLoopDirective = 298,
 
-  CXCursor_LastStmt = CXCursor_OMPParallelGenericLoopDirective,
+  /** OpenMP target parallel loop directive.
+   */
+  CXCursor_OMPTargetParallelGenericLoopDirective = 299,
+
+  CXCursor_LastStmt = CXCursor_OMPTargetParallelGenericLoopDirective,
 
   /**
    * Cursor that represents the translation unit itself.

diff  --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index ceb874598aded..5aed81d6c72ac 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -3082,6 +3082,9 @@ DEF_TRAVERSE_STMT(OMPTargetTeamsGenericLoopDirective,
 
 DEF_TRAVERSE_STMT(OMPParallelGenericLoopDirective,
                   { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPTargetParallelGenericLoopDirective,
+                  { TRY_TO(TraverseOMPExecutableDirective(S)); })
 // OpenMP clauses.
 template <typename Derived>
 bool RecursiveASTVisitor<Derived>::TraverseOMPClause(OMPClause *C) {

diff  --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h
index 9178b47a0b5f4..28b3567b36556 100644
--- a/clang/include/clang/AST/StmtOpenMP.h
+++ b/clang/include/clang/AST/StmtOpenMP.h
@@ -1529,6 +1529,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
            T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass ||
            T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass ||
            T->getStmtClass() == OMPParallelGenericLoopDirectiveClass ||
+           T->getStmtClass() == OMPTargetParallelGenericLoopDirectiveClass ||
            T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
            T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass ||
            T->getStmtClass() == OMPDistributeDirectiveClass ||
@@ -5767,6 +5768,71 @@ class OMPParallelGenericLoopDirective final : public OMPLoopDirective {
     return T->getStmtClass() == OMPParallelGenericLoopDirectiveClass;
   }
 };
+
+/// This represents '#pragma omp target parallel loop' directive.
+///
+/// \code
+/// #pragma omp target parallel loop private(a,b) order(concurrent)
+/// \endcode
+/// In this example directive '#pragma omp target parallel loop' has
+/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
+///
+class OMPTargetParallelGenericLoopDirective final : public OMPLoopDirective {
+  friend class ASTStmtReader;
+  friend class OMPExecutableDirective;
+  /// Build directive with the given start and end location.
+  ///
+  /// \param StartLoc Starting location of the directive kind.
+  /// \param EndLoc Ending location of the directive.
+  /// \param CollapsedNum Number of collapsed nested loops.
+  ///
+  OMPTargetParallelGenericLoopDirective(SourceLocation StartLoc,
+                                        SourceLocation EndLoc,
+                                        unsigned CollapsedNum)
+      : OMPLoopDirective(OMPTargetParallelGenericLoopDirectiveClass,
+                         llvm::omp::OMPD_target_parallel_loop, StartLoc, EndLoc,
+                         CollapsedNum) {}
+
+  /// Build an empty directive.
+  ///
+  /// \param CollapsedNum Number of collapsed nested loops.
+  ///
+  explicit OMPTargetParallelGenericLoopDirective(unsigned CollapsedNum)
+      : OMPLoopDirective(OMPTargetParallelGenericLoopDirectiveClass,
+                         llvm::omp::OMPD_target_parallel_loop, SourceLocation(),
+                         SourceLocation(), CollapsedNum) {}
+
+public:
+  /// Creates directive with a list of \p Clauses.
+  ///
+  /// \param C AST context.
+  /// \param StartLoc Starting location of the directive kind.
+  /// \param EndLoc Ending Location of the directive.
+  /// \param CollapsedNum Number of collapsed loops.
+  /// \param Clauses List of clauses.
+  /// \param AssociatedStmt Statement, associated with the directive.
+  /// \param Exprs Helper expressions for CodeGen.
+  ///
+  static OMPTargetParallelGenericLoopDirective *
+  Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+         unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+         Stmt *AssociatedStmt, const HelperExprs &Exprs);
+
+  /// Creates an empty directive with the place
+  /// for \a NumClauses clauses.
+  ///
+  /// \param C AST context.
+  /// \param CollapsedNum Number of collapsed nested loops.
+  /// \param NumClauses Number of clauses.
+  ///
+  static OMPTargetParallelGenericLoopDirective *
+  CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
+              EmptyShell);
+
+  static bool classof(const Stmt *T) {
+    return T->getStmtClass() == OMPTargetParallelGenericLoopDirectiveClass;
+  }
+};
 } // end namespace clang
 
 #endif

diff  --git a/clang/include/clang/Basic/StmtNodes.td b/clang/include/clang/Basic/StmtNodes.td
index 63568589a3ef3..1954bb464d959 100644
--- a/clang/include/clang/Basic/StmtNodes.td
+++ b/clang/include/clang/Basic/StmtNodes.td
@@ -286,3 +286,4 @@ def OMPGenericLoopDirective : StmtNode<OMPLoopDirective>;
 def OMPTeamsGenericLoopDirective : StmtNode<OMPLoopDirective>;
 def OMPTargetTeamsGenericLoopDirective : StmtNode<OMPLoopDirective>;
 def OMPParallelGenericLoopDirective : StmtNode<OMPLoopDirective>;
+def OMPTargetParallelGenericLoopDirective : StmtNode<OMPLoopDirective>;

diff  --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index cf00cea7b69d2..8e3f9221763f3 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -10915,6 +10915,11 @@ class Sema final {
   StmtResult ActOnOpenMPParallelGenericLoopDirective(
       ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
       SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+  /// Called on well-formed '\#pragma omp target parallel loop' after parsing
+  /// of the associated statement.
+  StmtResult ActOnOpenMPTargetParallelGenericLoopDirective(
+      ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+      SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
   /// Called on well-formed '\#pragma omp cancellation point'.
   StmtResult
   ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,

diff  --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h
index ce83cf9381307..180c8f59d0aa0 100644
--- a/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/clang/include/clang/Serialization/ASTBitCodes.h
@@ -1964,6 +1964,7 @@ enum StmtCode {
   STMT_OMP_TEAMS_GENERIC_LOOP_DIRECTIVE,
   STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE,
   STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE,
+  STMT_OMP_TARGET_PARALLEL_GENERIC_LOOP_DIRECTIVE,
   EXPR_OMP_ARRAY_SECTION,
   EXPR_OMP_ARRAY_SHAPING,
   EXPR_OMP_ITERATOR,

diff  --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp
index caae6d770ff42..84a4de00328a8 100644
--- a/clang/lib/AST/StmtOpenMP.cpp
+++ b/clang/lib/AST/StmtOpenMP.cpp
@@ -2268,3 +2268,49 @@ OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::CreateEmpty(
       C, NumClauses, /*HasAssociatedStmt=*/true,
       numLoopChildren(CollapsedNum, OMPD_parallel_loop), CollapsedNum);
 }
+
+OMPTargetParallelGenericLoopDirective *
+OMPTargetParallelGenericLoopDirective::Create(
+    const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+    unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+    const HelperExprs &Exprs) {
+  auto *Dir = createDirective<OMPTargetParallelGenericLoopDirective>(
+      C, Clauses, AssociatedStmt,
+      numLoopChildren(CollapsedNum, OMPD_target_parallel_loop), StartLoc,
+      EndLoc, CollapsedNum);
+  Dir->setIterationVariable(Exprs.IterationVarRef);
+  Dir->setLastIteration(Exprs.LastIteration);
+  Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+  Dir->setPreCond(Exprs.PreCond);
+  Dir->setCond(Exprs.Cond);
+  Dir->setInit(Exprs.Init);
+  Dir->setInc(Exprs.Inc);
+  Dir->setIsLastIterVariable(Exprs.IL);
+  Dir->setLowerBoundVariable(Exprs.LB);
+  Dir->setUpperBoundVariable(Exprs.UB);
+  Dir->setStrideVariable(Exprs.ST);
+  Dir->setEnsureUpperBound(Exprs.EUB);
+  Dir->setNextLowerBound(Exprs.NLB);
+  Dir->setNextUpperBound(Exprs.NUB);
+  Dir->setNumIterations(Exprs.NumIterations);
+  Dir->setCounters(Exprs.Counters);
+  Dir->setPrivateCounters(Exprs.PrivateCounters);
+  Dir->setInits(Exprs.Inits);
+  Dir->setUpdates(Exprs.Updates);
+  Dir->setFinals(Exprs.Finals);
+  Dir->setDependentCounters(Exprs.DependentCounters);
+  Dir->setDependentInits(Exprs.DependentInits);
+  Dir->setFinalsConditions(Exprs.FinalsConditions);
+  Dir->setPreInits(Exprs.PreInits);
+  return Dir;
+}
+
+OMPTargetParallelGenericLoopDirective *
+OMPTargetParallelGenericLoopDirective::CreateEmpty(const ASTContext &C,
+                                                   unsigned NumClauses,
+                                                   unsigned CollapsedNum,
+                                                   EmptyShell) {
+  return createEmptyDirective<OMPTargetParallelGenericLoopDirective>(
+      C, NumClauses, /*HasAssociatedStmt=*/true,
+      numLoopChildren(CollapsedNum, OMPD_target_parallel_loop), CollapsedNum);
+}

diff  --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index ccea73ff08547..7552e0481e940 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -1023,6 +1023,12 @@ void StmtPrinter::VisitOMPParallelGenericLoopDirective(
   PrintOMPExecutableDirective(Node);
 }
 
+void StmtPrinter::VisitOMPTargetParallelGenericLoopDirective(
+    OMPTargetParallelGenericLoopDirective *Node) {
+  Indent() << "#pragma omp target parallel loop";
+  PrintOMPExecutableDirective(Node);
+}
+
 //===----------------------------------------------------------------------===//
 //  Expr printing methods.
 //===----------------------------------------------------------------------===//

diff  --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 6d31a8f9edccf..8cf06a2cf992e 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -1222,6 +1222,11 @@ void StmtProfiler::VisitOMPParallelGenericLoopDirective(
   VisitOMPLoopDirective(S);
 }
 
+void StmtProfiler::VisitOMPTargetParallelGenericLoopDirective(
+    const OMPTargetParallelGenericLoopDirective *S) {
+  VisitOMPLoopDirective(S);
+}
+
 void StmtProfiler::VisitExpr(const Expr *S) {
   VisitStmt(S);
 }

diff  --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp
index 2a57ad8e01039..c18d1cc87ac46 100644
--- a/clang/lib/Basic/OpenMPKinds.cpp
+++ b/clang/lib/Basic/OpenMPKinds.cpp
@@ -497,7 +497,7 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
          DKind == OMPD_target_teams_distribute_simd || DKind == OMPD_tile ||
          DKind == OMPD_unroll || DKind == OMPD_loop ||
          DKind == OMPD_teams_loop || DKind == OMPD_target_teams_loop ||
-         DKind == OMPD_parallel_loop;
+         DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop;
 }
 
 bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@@ -536,7 +536,7 @@ bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
          DKind == OMPD_parallel_master ||
          DKind == OMPD_parallel_master_taskloop ||
          DKind == OMPD_parallel_master_taskloop_simd ||
-         DKind == OMPD_parallel_loop;
+         DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop;
 }
 
 bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
@@ -547,7 +547,7 @@ bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
          DKind == OMPD_target_teams_distribute_parallel_for ||
          DKind == OMPD_target_teams_distribute_parallel_for_simd ||
          DKind == OMPD_target_teams_distribute_simd ||
-         DKind == OMPD_target_teams_loop;
+         DKind == OMPD_target_teams_loop || DKind == OMPD_target_parallel_loop;
 }
 
 bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) {
@@ -606,7 +606,8 @@ bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
 
 bool clang::isOpenMPGenericLoopDirective(OpenMPDirectiveKind Kind) {
   return Kind == OMPD_loop || Kind == OMPD_teams_loop ||
-         Kind == OMPD_target_teams_loop || Kind == OMPD_parallel_loop;
+         Kind == OMPD_target_teams_loop || Kind == OMPD_parallel_loop ||
+         Kind == OMPD_target_parallel_loop;
 }
 
 bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
@@ -681,6 +682,7 @@ void clang::getOpenMPCaptureRegions(
   case OMPD_target_parallel:
   case OMPD_target_parallel_for:
   case OMPD_target_parallel_for_simd:
+  case OMPD_target_parallel_loop:
     CaptureRegions.push_back(OMPD_task);
     CaptureRegions.push_back(OMPD_target);
     CaptureRegions.push_back(OMPD_parallel);

diff  --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 2b27d64bc93ef..43a9919c66add 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -405,6 +405,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
   case Stmt::OMPParallelGenericLoopDirectiveClass:
     llvm_unreachable("parallel loop directive not supported yet.");
     break;
+  case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
+    llvm_unreachable("target parallel loop directive not supported yet.");
+    break;
   }
 }
 

diff  --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index d2ba4d6413bc4..60c51d3015987 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -155,6 +155,7 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
       {OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd},
       {OMPD_target, OMPD_parallel, OMPD_target_parallel},
       {OMPD_target, OMPD_simd, OMPD_target_simd},
+      {OMPD_target_parallel, OMPD_loop, OMPD_target_parallel_loop},
       {OMPD_target_parallel, OMPD_for, OMPD_target_parallel_for},
       {OMPD_target_parallel_for, OMPD_simd, OMPD_target_parallel_for_simd},
       {OMPD_teams, OMPD_distribute, OMPD_teams_distribute},
@@ -2405,6 +2406,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
   case OMPD_teams_loop:
   case OMPD_target_teams_loop:
   case OMPD_parallel_loop:
+  case OMPD_target_parallel_loop:
     Diag(Tok, diag::err_omp_unexpected_directive)
         << 1 << getOpenMPDirectiveName(DKind);
     break;
@@ -2763,6 +2765,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
   case OMPD_teams_loop:
   case OMPD_target_teams_loop:
   case OMPD_parallel_loop:
+  case OMPD_target_parallel_loop:
   case OMPD_taskloop:
   case OMPD_taskloop_simd:
   case OMPD_master_taskloop:

diff  --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp
index 19b2fc7ffe9ad..207bde0b05440 100644
--- a/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -1502,6 +1502,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
   case Stmt::OMPTeamsGenericLoopDirectiveClass:
   case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
   case Stmt::OMPParallelGenericLoopDirectiveClass:
+  case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
   case Stmt::ReturnStmtClass:
   case Stmt::SEHExceptStmtClass:
   case Stmt::SEHFinallyStmtClass:

diff  --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 5c4edb79d1d61..3424393f7e5e6 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -3956,6 +3956,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
   case OMPD_target_parallel_for:
   case OMPD_target_parallel_for_simd:
   case OMPD_target_teams_loop:
+  case OMPD_target_parallel_loop:
   case OMPD_target_teams_distribute:
   case OMPD_target_teams_distribute_simd: {
     QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
@@ -6325,6 +6326,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
     Res = ActOnOpenMPParallelGenericLoopDirective(
         ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
     break;
+  case OMPD_target_parallel_loop:
+    Res = ActOnOpenMPTargetParallelGenericLoopDirective(
+        ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+    break;
   case OMPD_declare_target:
   case OMPD_end_declare_target:
   case OMPD_threadprivate:
@@ -10365,6 +10370,55 @@ StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective(
       Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
 }
 
+StmtResult Sema::ActOnOpenMPTargetParallelGenericLoopDirective(
+    ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+    SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+  if (!AStmt)
+    return StmtError();
+
+  // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+  // A list item may not appear in a lastprivate clause unless it is the
+  // loop iteration variable of a loop that is associated with the construct.
+  if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_parallel_loop,
+                                  DSAStack))
+    return StmtError();
+
+  auto *CS = cast<CapturedStmt>(AStmt);
+  // 1.2.2 OpenMP Language Terminology
+  // Structured block - An executable statement with a single entry at the
+  // top and a single exit at the bottom.
+  // The point of exit cannot be a branch out of the structured block.
+  // longjmp() and throw() must not violate the entry/exit criteria.
+  CS->getCapturedDecl()->setNothrow();
+  for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_parallel_loop);
+       ThisCaptureLevel > 1; --ThisCaptureLevel) {
+    CS = cast<CapturedStmt>(CS->getCapturedStmt());
+    // 1.2.2 OpenMP Language Terminology
+    // Structured block - An executable statement with a single entry at the
+    // top and a single exit at the bottom.
+    // The point of exit cannot be a branch out of the structured block.
+    // longjmp() and throw() must not violate the entry/exit criteria.
+    CS->getCapturedDecl()->setNothrow();
+  }
+
+  OMPLoopDirective::HelperExprs B;
+  // In presence of clause 'collapse', it will define the nested loops number.
+  unsigned NestedLoopCount =
+      checkOpenMPLoop(OMPD_target_parallel_loop, getCollapseNumberExpr(Clauses),
+                      /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+                      VarsWithImplicitDSA, B);
+  if (NestedLoopCount == 0)
+    return StmtError();
+
+  assert((CurContext->isDependentContext() || B.builtAll()) &&
+         "omp loop exprs were not built");
+
+  setFunctionHasBranchProtectedScope();
+
+  return OMPTargetParallelGenericLoopDirective::Create(
+      Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
 StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
                                             Stmt *AStmt,
                                             SourceLocation StartLoc,
@@ -14589,6 +14643,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
       LLVM_FALLTHROUGH;
     case OMPD_target_parallel:
     case OMPD_target_parallel_for:
+    case OMPD_target_parallel_loop:
       // If this clause applies to the nested 'parallel' region, capture within
       // the 'target' region, otherwise do not capture.
       if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
@@ -14734,6 +14789,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
     case OMPD_target_parallel:
     case OMPD_target_parallel_for:
     case OMPD_target_parallel_for_simd:
+    case OMPD_target_parallel_loop:
       CaptureRegion = OMPD_target;
       break;
     case OMPD_teams_distribute_parallel_for:
@@ -14859,6 +14915,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
     case OMPD_target_parallel:
     case OMPD_target_parallel_for:
     case OMPD_target_parallel_for_simd:
+    case OMPD_target_parallel_loop:
     case OMPD_threadprivate:
     case OMPD_allocate:
     case OMPD_taskyield:
@@ -14944,6 +15001,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
     case OMPD_target_parallel:
     case OMPD_target_parallel_for:
     case OMPD_target_parallel_for_simd:
+    case OMPD_target_parallel_loop:
     case OMPD_threadprivate:
     case OMPD_allocate:
     case OMPD_taskyield:
@@ -15048,6 +15106,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
     case OMPD_teams_loop:
     case OMPD_target_teams_loop:
     case OMPD_parallel_loop:
+    case OMPD_target_parallel_loop:
     case OMPD_simd:
     case OMPD_tile:
     case OMPD_unroll:
@@ -15133,6 +15192,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
     case OMPD_teams_loop:
     case OMPD_target_teams_loop:
     case OMPD_parallel_loop:
+    case OMPD_target_parallel_loop:
     case OMPD_simd:
     case OMPD_tile:
     case OMPD_unroll:
@@ -15169,6 +15229,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
     case OMPD_target_teams_distribute_simd:
     case OMPD_target_parallel_for:
     case OMPD_target_parallel_for_simd:
+    case OMPD_target_parallel_loop:
     case OMPD_target_teams_distribute_parallel_for:
     case OMPD_target_teams_distribute_parallel_for_simd:
     case OMPD_target_teams_loop:
@@ -15306,6 +15367,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
     case OMPD_teams_loop:
     case OMPD_target_teams_loop:
     case OMPD_parallel_loop:
+    case OMPD_target_parallel_loop:
     case OMPD_simd:
     case OMPD_tile:
     case OMPD_unroll:

diff  --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index bbca7d86fc81a..fb830ef2a118a 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -9282,6 +9282,18 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelGenericLoopDirective(
   return Res;
 }
 
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTargetParallelGenericLoopDirective(
+    OMPTargetParallelGenericLoopDirective *D) {
+  DeclarationNameInfo DirName;
+  getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel_loop, DirName,
+                                             nullptr, D->getBeginLoc());
+  StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+  getDerived().getSema().EndOpenMPDSABlock(Res.get());
+  return Res;
+}
+
 //===----------------------------------------------------------------------===//
 // OpenMP clause transformation
 //===----------------------------------------------------------------------===//

diff  --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index 1e2d44c222690..ed9f1d2b34289 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -2653,6 +2653,11 @@ void ASTStmtReader::VisitOMPParallelGenericLoopDirective(
   VisitOMPLoopDirective(D);
 }
 
+void ASTStmtReader::VisitOMPTargetParallelGenericLoopDirective(
+    OMPTargetParallelGenericLoopDirective *D) {
+  VisitOMPLoopDirective(D);
+}
+
 //===----------------------------------------------------------------------===//
 // ASTReader Implementation
 //===----------------------------------------------------------------------===//
@@ -3632,6 +3637,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
       break;
     }
 
+    case STMT_OMP_TARGET_PARALLEL_GENERIC_LOOP_DIRECTIVE: {
+      unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+      unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+      S = OMPTargetParallelGenericLoopDirective::CreateEmpty(
+          Context, NumClauses, CollapsedNum, Empty);
+      break;
+    }
+
     case EXPR_CXX_OPERATOR_CALL:
       S = CXXOperatorCallExpr::CreateEmpty(
           Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],

diff  --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index 906c1b52d8a6b..1a4c782d5917d 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -2613,6 +2613,12 @@ void ASTStmtWriter::VisitOMPParallelGenericLoopDirective(
   Code = serialization::STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE;
 }
 
+void ASTStmtWriter::VisitOMPTargetParallelGenericLoopDirective(
+    OMPTargetParallelGenericLoopDirective *D) {
+  VisitOMPLoopDirective(D);
+  Code = serialization::STMT_OMP_TARGET_PARALLEL_GENERIC_LOOP_DIRECTIVE;
+}
+
 //===----------------------------------------------------------------------===//
 // ASTWriter Implementation
 //===----------------------------------------------------------------------===//

diff  --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 6ad6eaf6dd468..ec7ab25311454 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1300,6 +1300,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
     case Stmt::OMPTeamsGenericLoopDirectiveClass:
     case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
     case Stmt::OMPParallelGenericLoopDirectiveClass:
+    case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
     case Stmt::CapturedStmtClass:
     case Stmt::OMPUnrollDirectiveClass:
     case Stmt::OMPMetaDirectiveClass: {

diff  --git a/clang/test/Analysis/cfg-openmp.cpp b/clang/test/Analysis/cfg-openmp.cpp
index 7d398fdb85f2d..11325deb20aca 100644
--- a/clang/test/Analysis/cfg-openmp.cpp
+++ b/clang/test/Analysis/cfg-openmp.cpp
@@ -747,3 +747,27 @@ void parallelloop(int argc) {
   for (int i = 0; i < 10; ++i)
     argc = x;
 }
+
+// CHECK-LABEL:  void targetparallelloop(int argc)
+void targetparallelloop(int argc) {
+  int x, cond, fp, rd, map;
+// CHECK-DAG:   [B3]
+// CHECK-DAG:  [[#TTDB:]]: x
+// CHECK-DAG:  [[#TTDB+1]]: [B3.[[#TTDB]]] (ImplicitCastExpr, LValueToRValue, int)
+// CHECK-DAG:  [[#TTDB+2]]: argc
+// CHECK-DAG:  [[#TTDB+3]]: [B3.[[#TTDB+2]]] = [B3.[[#TTDB+1]]]
+// CHECK-DAG:   [B1]
+// CHECK-DAG:  [[#TTD:]]: cond
+// CHECK-DAG:  [[#TTD+1]]: [B1.[[#TTD]]] (ImplicitCastExpr, LValueToRValue, int)
+// CHECK-DAG:  [[#TTD+2]]: [B1.[[#TTD+1]]] (ImplicitCastExpr, IntegralToBoolean, _Bool)
+// CHECK-DAG:  [[#TTD+3]]: fp
+// CHECK-DAG:  [[#TTD+4]]: rd
+// CHECK-DAG:  [[#TTD+5]]: [B3.[[#TTDB+2]]]
+// CHECK-DAG:  [[#TTD+6]]: [B3.[[#TTDB]]]
+// CHECK-DAG:  [[#TTD+7]]: #pragma omp target parallel loop if(cond) firstprivate(fp) reduction(+: rd) map(alloc: map)
+// CHECK-DAG:    for (int i = 0;
+// CHECK-DAG:        [B3.[[#TTDB+3]]];
+#pragma omp target parallel loop if(cond) firstprivate(fp) reduction(+:rd) map(alloc:map)
+  for (int i = 0; i < 10; ++i)
+    argc = x;
+}

diff  --git a/clang/test/OpenMP/target_parallel_generic_loop_ast_print.cpp b/clang/test/OpenMP/target_parallel_generic_loop_ast_print.cpp
new file mode 100644
index 0000000000000..c9bd78457c1c4
--- /dev/null
+++ b/clang/test/OpenMP/target_parallel_generic_loop_ast_print.cpp
@@ -0,0 +1,123 @@
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \
+// RUN:   -fsyntax-only -verify %s
+
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \
+// RUN:   -ast-print %s | FileCheck %s
+
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \
+// RUN:   -emit-pch -o %t %s
+
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \
+// RUN:   -include-pch %t -ast-print %s | FileCheck %s
+
+// expected-no-diagnostics
+
+#ifndef HEADER
+#define HEADER
+
+typedef void **omp_allocator_handle_t;
+extern const omp_allocator_handle_t omp_null_allocator;
+extern const omp_allocator_handle_t omp_default_mem_alloc;
+extern const omp_allocator_handle_t omp_large_cap_mem_alloc;
+extern const omp_allocator_handle_t omp_const_mem_alloc;
+extern const omp_allocator_handle_t omp_high_bw_mem_alloc;
+extern const omp_allocator_handle_t omp_low_lat_mem_alloc;
+extern const omp_allocator_handle_t omp_cgroup_mem_alloc;
+extern const omp_allocator_handle_t omp_pteam_mem_alloc;
+extern const omp_allocator_handle_t omp_thread_mem_alloc;
+
+//CHECK: template <typename T, int C, int D> void templ_foo(T t) {
+//CHECK:   T j, z;
+//CHECK:   #pragma omp target parallel loop device(D) collapse(C) reduction(+: z) lastprivate(j) bind(thread) num_threads(C + 2)
+//CHECK:   for (T i = 0; i < t; ++i)
+//CHECK:       for (j = 0; j < t; ++j)
+//CHECK:           z += i + j;
+//CHECK: }
+
+//CHECK: template<> void templ_foo<int, 2, 0>(int t) {
+//CHECK:     int j, z;
+//CHECK:     #pragma omp target parallel loop device(0) collapse(2) reduction(+: z) lastprivate(j) bind(thread) num_threads(2 + 2)
+//CHECK:         for (int i = 0; i < t; ++i)
+//CHECK:             for (j = 0; j < t; ++j)
+//CHECK:                 z += i + j;
+//CHECK: }
+template <typename T, int C, int D>
+void templ_foo(T t) {
+
+  T j,z;
+  #pragma omp target parallel loop device(D) collapse(C) reduction(+:z) lastprivate(j) bind(thread) num_threads(C+2)
+  for (T i = 0; i<t; ++i)
+    for (j = 0; j<t; ++j)
+      z += i+j;
+}
+
+
+//CHECK: void test() {
+void test() {
+  constexpr int N = 100;
+  float MTX[N][N];
+  int aaa[1000];
+
+  //CHECK: #pragma omp target parallel loop map(tofrom: MTX)
+  #pragma omp target parallel loop map(MTX)
+  for (auto j = 0; j < N; ++j) {
+    MTX[0][j] = 0;
+  }
+
+  int j, z, z1;
+  //CHECK: #pragma omp target parallel loop collapse(2) private(z) lastprivate(j) order(concurrent) reduction(+: z1) bind(parallel)
+  #pragma omp target parallel loop collapse(2) private(z) lastprivate(j) \
+                         order(concurrent) reduction(+:z1) bind(parallel)
+  for (auto i = 0; i < N; ++i) {
+    for (j = 0; j < N; ++j) {
+      z = i+j;
+      MTX[i][j] = z;
+      z1 += z;
+    }
+  }
+
+  //CHECK: #pragma omp target parallel loop bind(thread) num_threads(16) default(none)
+  #pragma omp target parallel loop bind(thread) num_threads(16) default(none)
+  for (auto i = 0; i < N; ++i) { }
+
+  int pr;
+  int zzz;
+  //CHECK: #pragma omp target parallel loop private(zzz) uses_allocators(omp_default_mem_alloc) allocate(omp_default_mem_alloc: zzz) if(1) device(0) proc_bind(close) map(tofrom: pr)
+  #pragma omp target parallel loop private(zzz) uses_allocators(omp_default_mem_alloc) allocate(omp_default_mem_alloc:zzz) if(1) device(0) proc_bind(close) map(tofrom:pr)
+  for (int i=0; i<1000; ++i) {
+    zzz = i + 1;
+    pr = 33;
+  }
+
+  int fpr = 10;
+  int k;
+  int s = 20;
+  //CHECK: #pragma omp target parallel loop bind(thread) private(pr) firstprivate(fpr) shared(s) allocate(k) reduction(+: k)
+  #pragma omp target parallel loop bind(thread) private(pr) firstprivate(fpr) \
+                        shared(s) allocate(k)  reduction(+:k)
+  for (auto i = 0; i < N; ++i) {
+    pr = i + fpr + s;
+  }
+
+  short y = 3;
+  //CHECK: #pragma omp target parallel loop map(tofrom: y) depend(out : y)
+  #pragma omp target parallel loop map(tofrom:y) depend(out:y)
+  for (int i=0; i<10; ++i) {
+    y = 3+i;
+  }
+}
+
+//CHECK: void nobindingfunc() {
+void nobindingfunc()
+{
+  //CHECK: #pragma omp target parallel loop
+  #pragma omp target parallel loop
+  for (int i=0; i<10; ++i) { }
+}
+
+void bar()
+{
+  templ_foo<int,2,0>(8);
+}
+
+#endif // HEADER

diff  --git a/clang/test/OpenMP/target_parallel_generic_loop_messages.cpp b/clang/test/OpenMP/target_parallel_generic_loop_messages.cpp
new file mode 100644
index 0000000000000..64a828f60a760
--- /dev/null
+++ b/clang/test/OpenMP/target_parallel_generic_loop_messages.cpp
@@ -0,0 +1,165 @@
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -verify -fopenmp \
+// RUN:  -fopenmp-version=51 -Wuninitialized %s
+
+void foo()
+{
+  int i,j,k;
+  int z;
+
+  // expected-error at +2 {{statement after '#pragma omp target parallel loop' must be a for loop}}
+  #pragma omp target parallel loop bind(thread)
+  i = 0;
+
+  // OpenMP 5.1 [2.22 Nesting of regions]
+  //
+  // A barrier region may not be closely nested inside a worksharing, loop,
+  // task, taskloop, critical, ordered, atomic, or masked region.
+
+  // expected-error at +3 {{region cannot be closely nested inside 'target parallel loop' region}}
+  #pragma omp target parallel loop bind(thread)
+  for (i=0; i<1000; ++i) {
+    #pragma omp barrier
+  }
+
+  // A masked region may not be closely nested inside a worksharing, loop,
+  // atomic, task, or taskloop region.
+
+  // expected-error at +3 {{region cannot be closely nested inside 'target parallel loop' region}}
+  #pragma omp target parallel loop bind(thread)
+  for (i=0; i<1000; ++i) {
+    #pragma omp masked filter(2)
+    { }
+  }
+
+  // An ordered region that corresponds to an ordered construct without any
+  // clause or with the threads or depend clause may not be closely nested
+  // inside a critical, ordered, loop, atomic, task, or taskloop region.
+
+  // expected-error at +3 {{region cannot be closely nested inside 'target parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}}
+  #pragma omp target parallel loop bind(thread)
+  for (i=0; i<1000; ++i) {
+    #pragma omp ordered
+    { }
+  }
+
+  // expected-error at +3 {{region cannot be closely nested inside 'target parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}}
+  #pragma omp target parallel loop bind(thread)
+  for (i=0; i<1000; ++i) {
+    #pragma omp ordered threads
+    { }
+  }
+
+  // expected-error at +3 {{region cannot be closely nested inside 'target parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}}
+  #pragma omp target parallel loop bind(thread)
+  for (i=0; i<1000; ++i) {
+    #pragma omp ordered depend(source)
+  }
+
+  // bind clause
+
+  // expected-error at +1 {{directive '#pragma omp target parallel loop' cannot contain more than one 'bind' clause}}
+  #pragma omp target parallel loop bind(thread) bind(thread)
+  for (i=0; i<1000; ++i) {
+  }
+
+  // expected-error at +1 {{expected 'teams', 'parallel' or 'thread' in OpenMP clause 'bind'}}
+  #pragma omp target parallel loop bind(other)
+  for (i=0; i<1000; ++i) {
+  }
+
+  // collapse clause
+
+  // expected-error at +4 {{expected 2 for loops after '#pragma omp target parallel loop', but found only 1}}
+  // expected-note at +1 {{as specified in 'collapse' clause}}
+  #pragma omp target parallel loop collapse(2) bind(thread)
+  for (i=0; i<1000; ++i)
+    z = i+11;
+
+  // expected-error at +1 {{directive '#pragma omp target parallel loop' cannot contain more than one 'collapse' clause}}
+  #pragma omp target parallel loop collapse(2) collapse(2) bind(thread)
+  for (i=0; i<1000; ++i)
+    for (j=0; j<1000; ++j)
+      z = i+j+11;
+
+  // order clause
+
+  // expected-error at +1 {{expected 'concurrent' in OpenMP clause 'order'}}
+  #pragma omp target parallel loop order(foo) bind(thread)
+  for (i=0; i<1000; ++i)
+    z = i+11;
+
+  // private clause
+
+  // expected-error at +1 {{use of undeclared identifier 'undef_var'}}
+  #pragma omp target parallel loop private(undef_var) bind(thread)
+  for (i=0; i<1000; ++i)
+    z = i+11;
+
+  // lastprivate
+
+  // A list item may not appear in a lastprivate clause unless it is the loop
+  // iteration variable of a loop that is associated with the construct.
+
+  // expected-error at +1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp target parallel loop' directives}}
+  #pragma omp target parallel loop lastprivate(z) bind(thread)
+  for (i=0; i<1000; ++i) {
+    z = i+11;
+  }
+
+  // expected-error at +1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp target parallel loop' directives}}
+  #pragma omp target parallel loop lastprivate(k) collapse(2) bind(thread)
+  for (i=0; i<1000; ++i)
+    for (j=0; j<1000; ++j)
+      for (k=0; k<1000; ++k)
+        z = i+j+k+11;
+
+  // reduction
+
+  // expected-error at +1 {{use of undeclared identifier 'undef_var'}}
+  #pragma omp target parallel loop reduction(+:undef_var) bind(thread)
+  for (i=0; i<1000; ++i)
+    z = i+11;
+
+  // nowait
+
+  // expected-error at +1 {{directive '#pragma omp target parallel loop' cannot contain more than one 'nowait' clause}}
+  #pragma omp target parallel loop nowait nowait
+  for (i=0; i<1000; ++i)
+    z = i+11;
+
+  // num_threads
+
+  // expected-error at +1 {{directive '#pragma omp target parallel loop' cannot contain more than one 'num_threads' clause}}
+  #pragma omp target parallel loop num_threads(4) num_threads(4)
+  for (i=0; i<1000; ++i)
+    z = i+11;
+
+  // proc_bind
+
+  // expected-error at +1 {{directive '#pragma omp target parallel loop' cannot contain more than one 'proc_bind' clause}}
+  #pragma omp target parallel loop proc_bind(close) proc_bind(primary)
+  for (i=0; i<1000; ++i)
+    z = i+11;
+}
+
+template <typename T, int C>
+void templ_test(T t) {
+  T i,z;
+
+  // expected-error at +4 {{expected 2 for loops after '#pragma omp target parallel loop', but found only 1}}
+  // expected-note at +1 {{as specified in 'collapse' clause}}
+  #pragma omp target parallel loop collapse(C) bind(thread)
+  for (i=0; i<1000; ++i)
+    z = i+11;
+
+  // expected-error at +1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp target parallel loop' directives}}
+  #pragma omp target parallel loop lastprivate(z) bind(thread)
+  for (i=0; i<1000; ++i) {
+    z = i+11;
+  }
+}
+
+void bar()
+{
+  templ_test<int, 2>(16); // expected-note {{in instantiation of function template specialization 'templ_test<int, 2>' requested here}}
+}

diff  --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index 54f461e104bb8..681dbba432066 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -5732,6 +5732,8 @@ CXString clang_getCursorKindSpelling(enum CXCursorKind Kind) {
     return cxstring::createRef("OMPTargetTeamsGenericLoopDirective");
   case CXCursor_OMPParallelGenericLoopDirective:
     return cxstring::createRef("OMPParallelGenericLoopDirective");
+  case CXCursor_OMPTargetParallelGenericLoopDirective:
+    return cxstring::createRef("OMPTargetParallelGenericLoopDirective");
   case CXCursor_OverloadCandidate:
     return cxstring::createRef("OverloadCandidate");
   case CXCursor_TypeAliasTemplateDecl:

diff  --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp
index bbd155e060dc9..fbe30516e507d 100644
--- a/clang/tools/libclang/CXCursor.cpp
+++ b/clang/tools/libclang/CXCursor.cpp
@@ -835,6 +835,9 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent,
   case Stmt::OMPParallelGenericLoopDirectiveClass:
     K = CXCursor_OMPParallelGenericLoopDirective;
     break;
+  case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
+    K = CXCursor_OMPTargetParallelGenericLoopDirective;
+    break;
   case Stmt::BuiltinBitCastExprClass:
     K = CXCursor_BuiltinBitCastExpr;
   }

diff  --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td
index d8ccaede74b34..053859bf361c8 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -1828,6 +1828,33 @@ def OMP_parallel_loop : Directive<"parallel loop"> {
     VersionedClause<OMPC_ProcBind>,
   ];
 }
+def OMP_target_parallel_loop : Directive<"target parallel loop"> {
+  let allowedClauses = [
+    VersionedClause<OMPC_Allocate>,
+    VersionedClause<OMPC_Copyin>,
+    VersionedClause<OMPC_Depend>,
+    VersionedClause<OMPC_Device>,
+    VersionedClause<OMPC_FirstPrivate>,
+    VersionedClause<OMPC_IsDevicePtr>,
+    VersionedClause<OMPC_LastPrivate>,
+    VersionedClause<OMPC_Map>,
+    VersionedClause<OMPC_Private>,
+    VersionedClause<OMPC_Reduction>,
+    VersionedClause<OMPC_Shared>,
+    VersionedClause<OMPC_UsesAllocators, 50>,
+  ];
+  let allowedOnceClauses = [
+    VersionedClause<OMPC_Bind, 50>,
+    VersionedClause<OMPC_Collapse>,
+    VersionedClause<OMPC_Default>,
+    VersionedClause<OMPC_DefaultMap>,
+    VersionedClause<OMPC_If>,
+    VersionedClause<OMPC_NoWait>,
+    VersionedClause<OMPC_NumThreads>,
+    VersionedClause<OMPC_Order>,
+    VersionedClause<OMPC_ProcBind>,
+  ];
+}
 def OMP_Metadirective : Directive<"metadirective"> {
   let allowedClauses = [VersionedClause<OMPC_When>];
   let allowedOnceClauses = [VersionedClause<OMPC_Default>];


        


More information about the llvm-commits mailing list