[clang] [llvm] [OpenMP] Add support for '#pragma omp stripe'. (PR #119891)
Zahira Ammarguellat via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 13 07:49:29 PST 2024
https://github.com/zahiraam created https://github.com/llvm/llvm-project/pull/119891
None
>From 378bc7a1929930b5741b5ed2d37895d439b2c9e7 Mon Sep 17 00:00:00 2001
From: Zahira Ammarguellat <zahira.ammarguellat at intel.com>
Date: Fri, 13 Dec 2024 07:45:00 -0800
Subject: [PATCH] [OpenMP] Add support for '#pragma omp stripe'.
---
clang/include/clang-c/Index.h | 4 +
clang/include/clang/AST/RecursiveASTVisitor.h | 3 +
clang/include/clang/AST/StmtOpenMP.h | 79 +-
clang/include/clang/Basic/StmtNodes.td | 1 +
clang/include/clang/Sema/SemaOpenMP.h | 3 +
.../include/clang/Serialization/ASTBitCodes.h | 1 +
clang/lib/AST/StmtOpenMP.cpp | 21 +
clang/lib/AST/StmtPrinter.cpp | 5 +
clang/lib/AST/StmtProfile.cpp | 4 +
clang/lib/Basic/OpenMPKinds.cpp | 3 +-
clang/lib/CodeGen/CGStmtOpenMP.cpp | 8 +
clang/lib/CodeGen/CodeGenFunction.h | 1 +
clang/lib/Parse/ParseOpenMP.cpp | 5 +-
clang/lib/Sema/SemaExceptionSpec.cpp | 2 +-
clang/lib/Sema/SemaOpenMP.cpp | 279 +++
clang/lib/Sema/TreeTransform.h | 11 +
clang/lib/Serialization/ASTReaderStmt.cpp | 11 +
clang/lib/Serialization/ASTWriterStmt.cpp | 5 +
clang/lib/StaticAnalyzer/Core/ExprEngine.cpp | 1 +
clang/test/OpenMP/stripe_ast_print.cpp | 198 +++
clang/test/OpenMP/stripe_ast_print.cpp.mine | 163 ++
clang/test/OpenMP/stripe_codegen.cpp | 1544 +++++++++++++++++
.../OpenMP/stripe_codegen_for_dependent.cpp | 193 +++
clang/test/OpenMP/stripe_messages.cpp | 163 ++
clang/tools/libclang/CIndex.cpp | 7 +
clang/tools/libclang/CXCursor.cpp | 5 +-
llvm/include/llvm/Frontend/OpenMP/OMP.td | 7 +
27 files changed, 2721 insertions(+), 6 deletions(-)
create mode 100644 clang/test/OpenMP/stripe_ast_print.cpp
create mode 100644 clang/test/OpenMP/stripe_ast_print.cpp.mine
create mode 100644 clang/test/OpenMP/stripe_codegen.cpp
create mode 100644 clang/test/OpenMP/stripe_codegen_for_dependent.cpp
create mode 100644 clang/test/OpenMP/stripe_messages.cpp
diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h
index 8fc06328f0bcef..b8678d1bc77974 100644
--- a/clang/include/clang-c/Index.h
+++ b/clang/include/clang-c/Index.h
@@ -2158,6 +2158,10 @@ enum CXCursorKind {
*/
CXCursor_OMPAssumeDirective = 309,
+ /** OpenMP assume directive.
+ */
+ CXCursor_OMPStripeDirective = 310,
+
/** OpenACC Compute Construct.
*/
CXCursor_OpenACCComputeConstruct = 320,
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index 76b598a5db2382..e0fe4e5ef20beb 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -3038,6 +3038,9 @@ DEF_TRAVERSE_STMT(OMPSimdDirective,
DEF_TRAVERSE_STMT(OMPTileDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
+DEF_TRAVERSE_STMT(OMPStripeDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
DEF_TRAVERSE_STMT(OMPUnrollDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h
index 9ec49b8683dc8e..dc7873d7d12102 100644
--- a/clang/include/clang/AST/StmtOpenMP.h
+++ b/clang/include/clang/AST/StmtOpenMP.h
@@ -994,7 +994,9 @@ class OMPLoopTransformationDirective : public OMPLoopBasedDirective {
static bool classof(const Stmt *T) {
Stmt::StmtClass C = T->getStmtClass();
return C == OMPTileDirectiveClass || C == OMPUnrollDirectiveClass ||
- C == OMPReverseDirectiveClass || C == OMPInterchangeDirectiveClass;
+ C == OMPReverseDirectiveClass || C == OMPInterchangeDirectiveClass ||
+ C == OMPStripeDirectiveClass;
+ ;
}
};
@@ -5621,6 +5623,81 @@ class OMPTileDirective final : public OMPLoopTransformationDirective {
}
};
+class OMPStripeDirective final : public OMPLoopTransformationDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+
+ /// Default list of offsets.
+ enum {
+ PreInitsOffset = 0,
+ TransformedStmtOffset,
+ };
+
+ explicit OMPStripeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned NumLoops)
+ : OMPLoopTransformationDirective(OMPStripeDirectiveClass,
+ llvm::omp::OMPD_stripe, StartLoc, EndLoc,
+ NumLoops) {
+ setNumGeneratedLoops(3 * NumLoops);
+ }
+
+ void setPreInits(Stmt *PreInits) {
+ Data->getChildren()[PreInitsOffset] = PreInits;
+ }
+
+ void setTransformedStmt(Stmt *S) {
+ Data->getChildren()[TransformedStmtOffset] = S;
+ }
+
+public:
+ /// Create a new AST node representation for '#pragma omp stripe'.
+ ///
+ /// \param C Context of the AST.
+ /// \param StartLoc Location of the introducer (e.g. the 'omp' token).
+ /// \param EndLoc Location of the directive's end (e.g. the tok::eod).
+ /// \param Clauses The directive's clauses.
+ /// \param NumLoops Number of associated loops (number of items in the
+ /// 'sizes' clause).
+ /// \param AssociatedStmt The outermost associated loop.
+ /// \param TransformedStmt The loop nest after striping, or nullptr in
+ /// dependent contexts.
+ /// \param PreInits Helper preinits statements for the loop nest.
+ static OMPStripeDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, unsigned NumLoops, Stmt *AssociatedStmt,
+ Stmt *TransformedStmt, Stmt *PreInits);
+
+ /// Build an empty '#pragma omp stripe' AST node for deserialization.
+ ///
+ /// \param C Context of the AST.
+ /// \param NumClauses Number of clauses to allocate.
+ /// \param NumLoops Number of associated loops to allocate.
+ static OMPStripeDirective *
+ CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned NumLoops);
+
+ /// Gets/sets the associated loops after striping.
+ ///
+ /// This is in de-sugared format stored as a CompoundStmt.
+ ///
+ /// \code
+ /// for (...)
+ /// ...
+ /// \endcode
+ ///
+ /// Note that if the generated loops a become associated loops of another
+ /// directive, they may need to be hoisted before them.
+ Stmt *getTransformedStmt() const {
+ return Data->getChildren()[TransformedStmtOffset];
+ }
+
+ /// Return preinits statement.
+ Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPStripeDirectiveClass;
+ }
+};
+
/// This represents the '#pragma omp unroll' loop transformation directive.
///
/// \code
diff --git a/clang/include/clang/Basic/StmtNodes.td b/clang/include/clang/Basic/StmtNodes.td
index 89f5a76eb11312..c85ffb3154686f 100644
--- a/clang/include/clang/Basic/StmtNodes.td
+++ b/clang/include/clang/Basic/StmtNodes.td
@@ -229,6 +229,7 @@ def OMPParallelDirective : StmtNode<OMPExecutableDirective>;
def OMPSimdDirective : StmtNode<OMPLoopDirective>;
def OMPLoopTransformationDirective : StmtNode<OMPLoopBasedDirective, 1>;
def OMPTileDirective : StmtNode<OMPLoopTransformationDirective>;
+def OMPStripeDirective : StmtNode<OMPLoopTransformationDirective>;
def OMPUnrollDirective : StmtNode<OMPLoopTransformationDirective>;
def OMPReverseDirective : StmtNode<OMPLoopTransformationDirective>;
def OMPInterchangeDirective : StmtNode<OMPLoopTransformationDirective>;
diff --git a/clang/include/clang/Sema/SemaOpenMP.h b/clang/include/clang/Sema/SemaOpenMP.h
index 3d1cc4fab1c10f..680bd3f75e705b 100644
--- a/clang/include/clang/Sema/SemaOpenMP.h
+++ b/clang/include/clang/Sema/SemaOpenMP.h
@@ -440,6 +440,9 @@ class SemaOpenMP : public SemaBase {
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
+ StmtResult ActOnOpenMPStripeDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h
index af0e08d800bf28..b3a7a0163ab44c 100644
--- a/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/clang/include/clang/Serialization/ASTBitCodes.h
@@ -1922,6 +1922,7 @@ enum StmtCode {
STMT_OMP_PARALLEL_DIRECTIVE,
STMT_OMP_SIMD_DIRECTIVE,
STMT_OMP_TILE_DIRECTIVE,
+ STMP_OMP_STRIPE_DIRECTIVE,
STMT_OMP_UNROLL_DIRECTIVE,
STMT_OMP_REVERSE_DIRECTIVE,
STMT_OMP_INTERCHANGE_DIRECTIVE,
diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp
index 4f441c2f92dc7f..f23647ef124049 100644
--- a/clang/lib/AST/StmtOpenMP.cpp
+++ b/clang/lib/AST/StmtOpenMP.cpp
@@ -417,6 +417,27 @@ OMPTileDirective::Create(const ASTContext &C, SourceLocation StartLoc,
return Dir;
}
+OMPStripeDirective *
+OMPStripeDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ unsigned NumLoops, Stmt *AssociatedStmt,
+ Stmt *TransformedStmt, Stmt *PreInits) {
+ OMPStripeDirective *Dir = createDirective<OMPStripeDirective>(
+ C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc,
+ NumLoops);
+ Dir->setTransformedStmt(TransformedStmt);
+ Dir->setPreInits(PreInits);
+ return Dir;
+}
+
+OMPStripeDirective *OMPStripeDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned NumLoops) {
+ return createEmptyDirective<OMPStripeDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, TransformedStmtOffset + 1,
+ SourceLocation(), SourceLocation(), NumLoops);
+}
+
OMPTileDirective *OMPTileDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned NumLoops) {
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index 7507c9d14327a0..a4bfb8564b4d7f 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -755,6 +755,11 @@ void StmtPrinter::VisitOMPTileDirective(OMPTileDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPStripeDirective(OMPStripeDirective *Node) {
+ Indent() << "#pragma omp stripe";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPUnrollDirective(OMPUnrollDirective *Node) {
Indent() << "#pragma omp unroll";
PrintOMPExecutableDirective(Node);
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 3dfbef1cdb712d..33e074bdd68578 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -1000,6 +1000,10 @@ void StmtProfiler::VisitOMPTileDirective(const OMPTileDirective *S) {
VisitOMPLoopTransformationDirective(S);
}
+void StmtProfiler::VisitOMPStripeDirective(const OMPStripeDirective *S) {
+ VisitOMPLoopTransformationDirective(S);
+}
+
void StmtProfiler::VisitOMPUnrollDirective(const OMPUnrollDirective *S) {
VisitOMPLoopTransformationDirective(S);
}
diff --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp
index 62a13f01481b28..a684fe9c470a02 100644
--- a/clang/lib/Basic/OpenMPKinds.cpp
+++ b/clang/lib/Basic/OpenMPKinds.cpp
@@ -700,7 +700,7 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
bool clang::isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_tile || DKind == OMPD_unroll || DKind == OMPD_reverse ||
- DKind == OMPD_interchange;
+ DKind == OMPD_interchange || DKind == OMPD_stripe;
}
bool clang::isOpenMPCombinedParallelADirective(OpenMPDirectiveKind DKind) {
@@ -821,6 +821,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_single:
case OMPD_target_data:
case OMPD_taskgroup:
+ case OMPD_stripe:
// These directives (when standalone) use OMPD_unknown as the region,
// but when they're constituents of a compound directive, and other
// leafs from that directive have specific regions, then these directives
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 6cb37b20b7aeee..81872a705b26c6 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -187,6 +187,8 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
PreInits = LD->getPreInits();
} else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) {
PreInits = Tile->getPreInits();
+ } else if (const auto *Stripe = dyn_cast<OMPStripeDirective>(&S)) {
+ PreInits = Stripe->getPreInits();
} else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
PreInits = Unroll->getPreInits();
} else if (const auto *Reverse = dyn_cast<OMPReverseDirective>(&S)) {
@@ -2823,6 +2825,12 @@ void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) {
EmitStmt(S.getTransformedStmt());
}
+void CodeGenFunction::EmitOMPStripeDirective(const OMPStripeDirective &S) {
+ // Emit the de-sugared statement.
+ OMPTransformDirectiveScopeRAII StripeScope(*this, &S);
+ EmitStmt(S.getTransformedStmt());
+}
+
void CodeGenFunction::EmitOMPReverseDirective(const OMPReverseDirective &S) {
// Emit the de-sugared statement.
OMPTransformDirectiveScopeRAII ReverseScope(*this, &S);
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index eaea0d8a08ac06..61c3faac772e6e 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -3820,6 +3820,7 @@ class CodeGenFunction : public CodeGenTypeCache {
void EmitOMPParallelDirective(const OMPParallelDirective &S);
void EmitOMPSimdDirective(const OMPSimdDirective &S);
void EmitOMPTileDirective(const OMPTileDirective &S);
+ void EmitOMPStripeDirective(const OMPStripeDirective &S);
void EmitOMPUnrollDirective(const OMPUnrollDirective &S);
void EmitOMPReverseDirective(const OMPReverseDirective &S);
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S);
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index b91928063169ee..973cef0668fdc8 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -2533,9 +2533,10 @@ StmtResult Parser::ParseOpenMPExecutableDirective(
}
}
- if (DKind == OMPD_tile && !SeenClauses[unsigned(OMPC_sizes)]) {
+ if ((DKind == OMPD_tile || DKind == OMPD_stripe) &&
+ !SeenClauses[unsigned(OMPC_sizes)]) {
Diag(Loc, diag::err_omp_required_clause)
- << getOpenMPDirectiveName(OMPD_tile) << "sizes";
+ << getOpenMPDirectiveName(DKind) << "sizes";
}
StmtResult AssociatedStmt;
diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp
index 6a9f43d6f5215e..26da214f381034 100644
--- a/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -1467,7 +1467,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPSectionDirectiveClass:
case Stmt::OMPSectionsDirectiveClass:
case Stmt::OMPSimdDirectiveClass:
- case Stmt::OMPTileDirectiveClass:
+ case Stmt::OMPTileDirectiveClass:OMPStripeDirectiveClass:
case Stmt::OMPUnrollDirectiveClass:
case Stmt::OMPReverseDirectiveClass:
case Stmt::OMPInterchangeDirectiveClass:
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 66ff92f554fc42..762b20465ffb74 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -4386,6 +4386,7 @@ void SemaOpenMP::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind,
case OMPD_master:
case OMPD_section:
case OMPD_tile:
+ case OMPD_stripe:
case OMPD_unroll:
case OMPD_reverse:
case OMPD_interchange:
@@ -6183,6 +6184,10 @@ StmtResult SemaOpenMP::ActOnOpenMPExecutableDirective(
Res =
ActOnOpenMPTileDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
break;
+ case OMPD_stripe:
+ Res = ActOnOpenMPStripeDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
case OMPD_unroll:
Res = ActOnOpenMPUnrollDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
@@ -14133,6 +14138,8 @@ bool SemaOpenMP::checkTransformableLoopNest(
Stmt *DependentPreInits;
if (auto *Dir = dyn_cast<OMPTileDirective>(Transform))
DependentPreInits = Dir->getPreInits();
+ else if (auto *Dir = dyn_cast<OMPStripeDirective>(Transform))
+ DependentPreInits = Dir->getPreInits();
else if (auto *Dir = dyn_cast<OMPUnrollDirective>(Transform))
DependentPreInits = Dir->getPreInits();
else if (auto *Dir = dyn_cast<OMPReverseDirective>(Transform))
@@ -14477,6 +14484,278 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
buildPreInits(Context, PreInits));
}
+StmtResult SemaOpenMP::ActOnOpenMPStripeDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ ASTContext &Context = getASTContext();
+ Scope *CurScope = SemaRef.getCurScope();
+
+ const auto *SizesClause =
+ OMPExecutableDirective::getSingleClause<OMPSizesClause>(Clauses);
+ if (!SizesClause ||
+ llvm::any_of(SizesClause->getSizesRefs(), [](Expr *E) { return !E; }))
+ return StmtError();
+ unsigned NumLoops = SizesClause->getNumSizes();
+
+ // Empty statement should only be possible if there already was an error.
+ if (!AStmt)
+ return StmtError();
+
+ // Verify and diagnose loop nest.
+ SmallVector<OMPLoopBasedDirective::HelperExprs, 4> LoopHelpers(NumLoops);
+ Stmt *Body = nullptr;
+ SmallVector<SmallVector<Stmt *, 0>, 4> OriginalInits;
+ if (!checkTransformableLoopNest(OMPD_stripe, AStmt, NumLoops, LoopHelpers, Body,
+ OriginalInits))
+ return StmtError();
+
+ // Delay tiling to when template is completely instantiated.
+ if (SemaRef.CurContext->isDependentContext())
+ return OMPStripeDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ NumLoops, AStmt, nullptr, nullptr);
+
+ assert(LoopHelpers.size() == NumLoops &&
+ "Expecting loop iteration space dimensionality to match number of "
+ "affected loops");
+ assert(OriginalInits.size() == NumLoops &&
+ "Expecting loop iteration space dimensionality to match number of "
+ "affected loops");
+
+ // Collect all affected loop statements.
+ SmallVector<Stmt *> LoopStmts(NumLoops, nullptr);
+ collectLoopStmts(AStmt, LoopStmts);
+
+ SmallVector<Stmt *, 4> PreInits;
+ CaptureVars CopyTransformer(SemaRef);
+
+ // Create iteration variables for the generated loops.
+ SmallVector<VarDecl *, 4> FloorIndVars;
+ SmallVector<VarDecl *, 4> StripeIndVars;
+ FloorIndVars.resize(NumLoops);
+ StripeIndVars.resize(NumLoops);
+ for (unsigned I = 0; I < NumLoops; ++I) {
+ OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers[I];
+
+ assert(LoopHelper.Counters.size() == 1 &&
+ "Expect single-dimensional loop iteration space");
+ auto *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters.front());
+ std::string OrigVarName = OrigCntVar->getNameInfo().getAsString();
+ DeclRefExpr *IterVarRef = cast<DeclRefExpr>(LoopHelper.IterationVarRef);
+ QualType CntTy = IterVarRef->getType();
+
+ // Iteration variable for the floor (i.e. outer) loop.
+ {
+ std::string FloorCntName =
+ (Twine(".floor_") + llvm::utostr(I) + ".iv." + OrigVarName).str();
+ VarDecl *FloorCntDecl =
+ buildVarDecl(SemaRef, {}, CntTy, FloorCntName, nullptr, OrigCntVar);
+ FloorIndVars[I] = FloorCntDecl;
+ }
+
+ // Iteration variable for the stripe (i.e. inner) loop.
+ {
+ std::string StripeCntName =
+ (Twine(".stripe_") + llvm::utostr(I) + ".iv." + OrigVarName).str();
+
+ // Reuse the iteration variable created by checkOpenMPLoop. It is also
+ // used by the expressions to derive the original iteration variable's
+ // value from the logical iteration number.
+ auto *StripeCntDecl = cast<VarDecl>(IterVarRef->getDecl());
+ StripeCntDecl->setDeclName(
+ &SemaRef.PP.getIdentifierTable().get(StripeCntName));
+ StripeIndVars[I] = StripeCntDecl;
+ }
+
+ addLoopPreInits(Context, LoopHelper, LoopStmts[I], OriginalInits[I],
+ PreInits);
+ }
+
+ // Once the original iteration values are set, append the innermost body.
+ Stmt *Inner = Body;
+
+ auto MakeDimStripeSize = [&SemaRef = this->SemaRef, &CopyTransformer, &Context,
+ SizesClause, CurScope](int I) -> Expr * {
+ Expr *DimStripeSizeExpr = SizesClause->getSizesRefs()[I];
+ if (isa<ConstantExpr>(DimStripeSizeExpr))
+ return AssertSuccess(CopyTransformer.TransformExpr(DimStripeSizeExpr));
+
+ // When the stripe size is not a constant but a variable, it is possible to
+ // pass non-positive numbers. For instance:
+ // \code{c}
+ // int a = 0;
+ // #pragma omp stripe sizes(a)
+ // for (int i = 0; i < 42; ++i)
+ // body(i);
+ // \endcode
+ // Although there is no meaningful interpretation of the stripe size, the body
+ // should still be executed 42 times to avoid surprises. To preserve the
+ // invariant that every loop iteration is executed exactly once and not
+ // cause an infinite loop, apply a minimum stripe size of one.
+ // Build expr:
+ // \code{c}
+ // (TS <= 0) ? 1 : TS
+ // \endcode
+ QualType DimTy = DimStripeSizeExpr->getType();
+ uint64_t DimWidth = Context.getTypeSize(DimTy);
+ IntegerLiteral *Zero = IntegerLiteral::Create(
+ Context, llvm::APInt::getZero(DimWidth), DimTy, {});
+ IntegerLiteral *One =
+ IntegerLiteral::Create(Context, llvm::APInt(DimWidth, 1), DimTy, {});
+ Expr *Cond = AssertSuccess(SemaRef.BuildBinOp(
+ CurScope, {}, BO_LE,
+ AssertSuccess(CopyTransformer.TransformExpr(DimStripeSizeExpr)), Zero));
+ Expr *MinOne = new (Context) ConditionalOperator(
+ Cond, {}, One, {},
+ AssertSuccess(CopyTransformer.TransformExpr(DimStripeSizeExpr)), DimTy,
+ VK_PRValue, OK_Ordinary);
+ return MinOne;
+ };
+
+ // Create stripe loops from the inside to the outside.
+ for (int I = NumLoops - 1; I >= 0; --I) {
+ OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers[I];
+ Expr *NumIterations = LoopHelper.NumIterations;
+ auto *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters[0]);
+ QualType IVTy = NumIterations->getType();
+ Stmt *LoopStmt = LoopStmts[I];
+
+ // Commonly used variables. One of the constraints of an AST is that every
+ // node object must appear at most once, hence we define lamdas that create
+ // a new AST node at every use.
+ auto MakeStripeIVRef = [&SemaRef = this->SemaRef, &StripeIndVars, I, IVTy,
+ OrigCntVar]() {
+ return buildDeclRefExpr(SemaRef, StripeIndVars[I], IVTy,
+ OrigCntVar->getExprLoc());
+ };
+ auto MakeFloorIVRef = [&SemaRef = this->SemaRef, &FloorIndVars, I, IVTy,
+ OrigCntVar]() {
+ return buildDeclRefExpr(SemaRef, FloorIndVars[I], IVTy,
+ OrigCntVar->getExprLoc());
+ };
+
+ // For init-statement: auto .stripe.iv = .floor.iv
+ SemaRef.AddInitializerToDecl(
+ StripeIndVars[I], SemaRef.DefaultLvalueConversion(MakeFloorIVRef()).get(),
+ /*DirectInit=*/false);
+ Decl *CounterDecl = StripeIndVars[I];
+ StmtResult InitStmt = new (Context)
+ DeclStmt(DeclGroupRef::Create(Context, &CounterDecl, 1),
+ OrigCntVar->getBeginLoc(), OrigCntVar->getEndLoc());
+ if (!InitStmt.isUsable())
+ return StmtError();
+
+ // For cond-expression:
+ // .stripe.iv < min(.floor.iv + DimStripeSize, NumIterations)
+ ExprResult EndOfStripe =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_Add,
+ MakeFloorIVRef(), MakeDimStripeSize(I));
+ if (!EndOfStripe.isUsable())
+ return StmtError();
+ ExprResult IsPartialStripe =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ NumIterations, EndOfStripe.get());
+ if (!IsPartialStripe.isUsable())
+ return StmtError();
+ ExprResult MinStripeAndIterSpace = SemaRef.ActOnConditionalOp(
+ LoopHelper.Cond->getBeginLoc(), LoopHelper.Cond->getEndLoc(),
+ IsPartialStripe.get(), NumIterations, EndOfStripe.get());
+ if (!MinStripeAndIterSpace.isUsable())
+ return StmtError();
+ ExprResult CondExpr =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ MakeStripeIVRef(), MinStripeAndIterSpace.get());
+ if (!CondExpr.isUsable())
+ return StmtError();
+
+ // For incr-statement: ++.stripe.iv
+ ExprResult IncrStmt = SemaRef.BuildUnaryOp(
+ CurScope, LoopHelper.Inc->getExprLoc(), UO_PreInc, MakeStripeIVRef());
+ if (!IncrStmt.isUsable())
+ return StmtError();
+
+ // Statements to set the original iteration variable's value from the
+ // logical iteration number.
+ // Generated for loop is:
+ // \code
+ // Original_for_init;
+ // for (auto .stripe.iv = .floor.iv;
+ // .stripe.iv < min(.floor.iv + DimStripeSize, NumIterations);
+ // ++.stripe.iv) {
+ // Original_Body;
+ // Original_counter_update;
+ // }
+ // \endcode
+ // FIXME: If the innermost body is an loop itself, inserting these
+ // statements stops it being recognized as a perfectly nested loop (e.g.
+ // for applying tiling again). If this is the case, sink the expressions
+ // further into the inner loop.
+ SmallVector<Stmt *, 4> BodyParts;
+ BodyParts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
+ if (auto *SourceCXXFor = dyn_cast<CXXForRangeStmt>(LoopStmt))
+ BodyParts.push_back(SourceCXXFor->getLoopVarStmt());
+ BodyParts.push_back(Inner);
+ Inner = CompoundStmt::Create(Context, BodyParts, FPOptionsOverride(),
+ Inner->getBeginLoc(), Inner->getEndLoc());
+ Inner = new (Context)
+ ForStmt(Context, InitStmt.get(), CondExpr.get(), nullptr,
+ IncrStmt.get(), Inner, LoopHelper.Init->getBeginLoc(),
+ LoopHelper.Init->getBeginLoc(), LoopHelper.Inc->getEndLoc());
+ }
+
+ // Create floor loops from the inside to the outside.
+ for (int I = NumLoops - 1; I >= 0; --I) {
+ auto &LoopHelper = LoopHelpers[I];
+ Expr *NumIterations = LoopHelper.NumIterations;
+ DeclRefExpr *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters[0]);
+ QualType IVTy = NumIterations->getType();
+
+ // Commonly used variables. One of the constraints of an AST is that every
+ // node object must appear at most once, hence we define lamdas that create
+ // a new AST node at every use.
+ auto MakeFloorIVRef = [&SemaRef = this->SemaRef, &FloorIndVars, I, IVTy,
+ OrigCntVar]() {
+ return buildDeclRefExpr(SemaRef, FloorIndVars[I], IVTy,
+ OrigCntVar->getExprLoc());
+ };
+
+ // For init-statement: auto .floor.iv = 0
+ SemaRef.AddInitializerToDecl(
+ FloorIndVars[I],
+ SemaRef.ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(),
+ /*DirectInit=*/false);
+ Decl *CounterDecl = FloorIndVars[I];
+ StmtResult InitStmt = new (Context)
+ DeclStmt(DeclGroupRef::Create(Context, &CounterDecl, 1),
+ OrigCntVar->getBeginLoc(), OrigCntVar->getEndLoc());
+ if (!InitStmt.isUsable())
+ return StmtError();
+
+ // For cond-expression: .floor.iv < NumIterations
+ ExprResult CondExpr =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ MakeFloorIVRef(), NumIterations);
+ if (!CondExpr.isUsable())
+ return StmtError();
+
+ // For incr-statement: .floor.iv += DimStripeSize
+ ExprResult IncrStmt =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), BO_AddAssign,
+ MakeFloorIVRef(), MakeDimStripeSize(I));
+ if (!IncrStmt.isUsable())
+ return StmtError();
+
+ Inner = new (Context)
+ ForStmt(Context, InitStmt.get(), CondExpr.get(), nullptr,
+ IncrStmt.get(), Inner, LoopHelper.Init->getBeginLoc(),
+ LoopHelper.Init->getBeginLoc(), LoopHelper.Inc->getEndLoc());
+ }
+
+ return OMPStripeDirective::Create(Context, StartLoc, EndLoc, Clauses, NumLoops,
+ AStmt, Inner,
+ buildPreInits(Context, PreInits));
+}
+
StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 02d2fc018e3c35..f7ec6db40c97d1 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -9431,6 +9431,17 @@ TreeTransform<Derived>::TransformOMPTileDirective(OMPTileDirective *D) {
return Res;
}
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPStripeDirective(OMPStripeDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ D->getDirectiveKind(), DirName, nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPUnrollDirective(OMPUnrollDirective *D) {
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index 9f4877b19d8705..73b2143671d4f7 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -2437,6 +2437,10 @@ void ASTStmtReader::VisitOMPTileDirective(OMPTileDirective *D) {
VisitOMPLoopTransformationDirective(D);
}
+void ASTStmtReader::VisitOMPStripeDirective(OMPStripeDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
+}
+
void ASTStmtReader::VisitOMPUnrollDirective(OMPUnrollDirective *D) {
VisitOMPLoopTransformationDirective(D);
}
@@ -3487,6 +3491,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMP_OMP_STRIPE_DIRECTIVE: {
+ unsigned NumLoops = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPStripeDirective::CreateEmpty(Context, NumClauses, NumLoops);
+ break;
+ }
+
case STMT_OMP_UNROLL_DIRECTIVE: {
assert(Record[ASTStmtReader::NumStmtFields] == 1 && "Unroll directive accepts only a single loop");
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index 603aa5707ce9be..bf708a93917054 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -2441,6 +2441,11 @@ void ASTStmtWriter::VisitOMPTileDirective(OMPTileDirective *D) {
Code = serialization::STMT_OMP_TILE_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPStripeDirective(OMPStripeDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
+ Code = serialization::STMP_OMP_STRIPE_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPUnrollDirective(OMPUnrollDirective *D) {
VisitOMPLoopTransformationDirective(D);
Code = serialization::STMT_OMP_UNROLL_DIRECTIVE;
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index b46cd9fe86fc11..bd6d753c7041ce 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1812,6 +1812,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
case Stmt::OMPReverseDirectiveClass:
case Stmt::OMPTileDirectiveClass:
+ case Stmt::OMPStripeDirectiveClass:
case Stmt::OMPInterchangeDirectiveClass:
case Stmt::OMPInteropDirectiveClass:
case Stmt::OMPDispatchDirectiveClass:
diff --git a/clang/test/OpenMP/stripe_ast_print.cpp b/clang/test/OpenMP/stripe_ast_print.cpp
new file mode 100644
index 00000000000000..87e4ae93626caa
--- /dev/null
+++ b/clang/test/OpenMP/stripe_ast_print.cpp
@@ -0,0 +1,198 @@
+// Check no warnings/errors
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fsyntax-only -verify %s
+// expected-no-diagnostics
+
+// Check AST and unparsing
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -ast-dump %s | FileCheck %s --check-prefix=DUMP
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -ast-print %s | FileCheck %s --check-prefix=PRINT
+
+// Check same results after serialization round-trip
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -ast-dump-all %s | FileCheck %s --check-prefix=DUMP
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -ast-print %s | FileCheck %s --check-prefix=PRINT
+
+// placeholder for loop body code.
+extern "C" void body(...);
+
+
+// PRINT-LABEL: void foo1(
+// DUMP-LABEL: FunctionDecl {{.*}} foo1
+void foo1() {
+ // PRINT: #pragma omp stripe sizes(5, 5)
+ // DUMP: OMPStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ #pragma omp stripe sizes(5,5)
+ // PRINT: for (int i = 7; i < 17; i += 3)
+ // DUMP-NEXT: ForStmt
+ for (int i = 7; i < 17; i += 3)
+ // PRINT: for (int j = 7; j < 17; j += 3)
+ // DUMP: ForStmt
+ for (int j = 7; j < 17; j += 3)
+ // PRINT: body(i, j);
+ // DUMP: CallExpr
+ body(i, j);
+}
+
+
+// PRINT-LABEL: void foo2(
+// DUMP-LABEL: FunctionDecl {{.*}} foo2
+void foo2(int start1, int start2, int end1, int end2) {
+ // PRINT: #pragma omp stripe sizes(5, 5)
+ // DUMP: OMPStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ #pragma omp stripe sizes(5,5)
+ // PRINT: for (int i = start1; i < end1; i += 1)
+ // DUMP-NEXT: ForStmt
+ for (int i = start1; i < end1; i += 1)
+ // PRINT: for (int j = start2; j < end2; j += 1)
+ // DUMP: ForStmt
+ for (int j = start2; j < end2; j += 1)
+ // PRINT: body(i, j);
+ // DUMP: CallExpr
+ body(i, j);
+}
+
+
+// PRINT-LABEL: void foo3(
+// DUMP-LABEL: FunctionDecl {{.*}} foo3
+void foo3() {
+ // PRINT: #pragma omp for
+ // DUMP: OMPForDirective
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp for
+ // PRINT: #pragma omp stripe sizes(5)
+ // DUMP-NEXT: OMPStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ #pragma omp stripe sizes(5)
+ for (int i = 7; i < 17; i += 3)
+ // PRINT: body(i);
+ // DUMP: CallExpr
+ body(i);
+}
+
+
+// PRINT-LABEL: void foo4(
+// DUMP-LABEL: FunctionDecl {{.*}} foo4
+void foo4() {
+ // PRINT: #pragma omp for collapse(3)
+ // DUMP: OMPForDirective
+ // DUMP-NEXT: OMPCollapseClause
+ // DUMP-NEXT: ConstantExpr
+ // DUMP-NEXT: value: Int 3
+ // DUMP-NEXT: IntegerLiteral {{.*}} 3
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp for collapse(3)
+ // PRINT: #pragma omp stripe sizes(5, 5)
+ // DUMP: OMPStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ #pragma omp stripe sizes(5, 5)
+ // PRINT: for (int i = 7; i < 17; i += 1)
+ // DUMP-NEXT: ForStmt
+ for (int i = 7; i < 17; i += 1)
+ // PRINT: for (int j = 7; j < 17; j += 1)
+ // DUMP: ForStmt
+ for (int j = 7; j < 17; j += 1)
+ // PRINT: body(i, j);
+ // DUMP: CallExpr
+ body(i, j);
+}
+
+
+// PRINT-LABEL: void foo5(
+// DUMP-LABEL: FunctionDecl {{.*}} foo5
+void foo5(int start, int end, int step) {
+ // PRINT: #pragma omp for collapse(2)
+ // DUMP: OMPForDirective
+ // DUMP-NEXT: OMPCollapseClause
+ // DUMP-NEXT: ConstantExpr
+ // DUMP-NEXT: value: Int 2
+ // DUMP-NEXT: IntegerLiteral {{.*}} 2
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp for collapse(2)
+ // PRINT: for (int i = 7; i < 17; i += 1)
+ // DUMP-NEXT: ForStmt
+ for (int i = 7; i < 17; i += 1)
+ // PRINT: #pragma omp stripe sizes(5)
+ // DUMP: OMPStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ #pragma omp stripe sizes(5)
+ // PRINT: for (int j = 7; j < 17; j += 1)
+ // DUMP-NEXT: ForStmt
+ for (int j = 7; j < 17; j += 1)
+ // PRINT: body(i, j);
+ // DUMP: CallExpr
+ body(i, j);
+}
+
+
+// PRINT-LABEL: void foo6(
+// DUMP-LABEL: FunctionTemplateDecl {{.*}} foo6
+template<typename T, T Step, T Stripe>
+void foo6(T start, T end) {
+ // PRINT: #pragma omp stripe sizes(Stripe)
+ // DUMP: OMPStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: DeclRefExpr {{.*}} 'Stripe' 'T'
+ #pragma omp stripe sizes(Stripe)
+ // PRINT-NEXT: for (T i = start; i < end; i += Step)
+ // DUMP-NEXT: ForStmt
+ for (T i = start; i < end; i += Step)
+ // PRINT-NEXT: body(i);
+ // DUMP: CallExpr
+ body(i);
+}
+
+// Also test instantiating the template.
+void tfoo6() {
+ foo6<int,3,5>(0, 42);
+}
+
+
+// PRINT-LABEL: template <int Stripe> void foo7(int start, int stop, int step) {
+// DUMP-LABEL: FunctionTemplateDecl {{.*}} foo7
+template <int Stripe>
+void foo7(int start, int stop, int step) {
+ // PRINT: #pragma omp stripe sizes(Stripe)
+ // DUMP: OMPStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: DeclRefExpr {{.*}} 'Stripe' 'int'
+ #pragma omp stripe sizes(Stripe)
+ // PRINT-NEXT: for (int i = start; i < stop; i += step)
+ // DUMP-NEXT: ForStmt
+ for (int i = start; i < stop; i += step)
+ // PRINT-NEXT: body(i);
+ // DUMP: CallExpr
+ body(i);
+}
+void tfoo7() {
+ foo7<5>(0, 42, 2);
+}
+
+
+// PRINT-LABEL: void foo8(
+// DUMP-LABEL: FunctionDecl {{.*}} foo8
+void foo8(int a) {
+ // PRINT: #pragma omp stripe sizes(a)
+ // DUMP: OMPStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: ImplicitCastExpr
+ // DUMP-NEXT: DeclRefExpr {{.*}} 'a'
+ #pragma omp stripe sizes(a)
+ // PRINT-NEXT: for (int i = 7; i < 19; i += 3)
+ // DUMP-NEXT: ForStmt
+ for (int i = 7; i < 19; i += 3)
+ // PRINT: body(i);
+ // DUMP: CallExpr
+ body(i);
+}
diff --git a/clang/test/OpenMP/stripe_ast_print.cpp.mine b/clang/test/OpenMP/stripe_ast_print.cpp.mine
new file mode 100644
index 00000000000000..8ffd821ccf0484
--- /dev/null
+++ b/clang/test/OpenMP/stripe_ast_print.cpp.mine
@@ -0,0 +1,163 @@
+// Check no warnings/errors
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-late-outline -fopenmp-full-intel-stripe -fsyntax-only -verify %s
+// expected-no-diagnostics
+
+// Check AST and unparsing
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-late-outline -fopenmp-full-intel-stripe -ast-dump %s | FileCheck %s --check-prefix=DUMP
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-late-outline -fopenmp-full-intel-stripe -ast-print %s | FileCheck %s --check-prefix=PRINT
+
+// Check same results after serialization round-trip
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-late-outline -fopenmp-full-intel-stripe -emit-pch -o %t %s
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-late-outline -fopenmp-full-intel-stripe -ast-dump-all %s | FileCheck %s --check-prefix=DUMP
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-late-outline -fopenmp-full-intel-stripe -ast-print %s | FileCheck %s --check-prefix=PRINT
+
+// placeholder for loop body code.
+extern "C" void body(...);
+
+
+// PRINT-LABEL: void foo1(
+// DUMP-LABEL: FunctionDecl {{.*}} foo1
+void foo1() {
+ // PRINT: #pragma omp stripe sizes(5, 5)
+ // DUMP: OMPIntelStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp stripe sizes(5,5)
+ // PRINT: for (int i = 7; i < 17; i += 3)
+ // DUMP-NEXT: ForStmt
+ for (int i = 7; i < 17; i += 3)
+ // PRINT: for (int j = 7; j < 17; j += 3)
+ // DUMP: ForStmt
+ for (int j = 7; j < 17; j += 3)
+ // PRINT: body(i, j)
+ // DUMP: CallExpr
+ body(i, j);
+}
+
+// PRINT-LABEL: void foo2(
+// DUMP-LABEL: FunctionDecl {{.*}} foo2
+void foo2(int start1, int start2, int end1, int end2) {
+ // PRINT: #pragma omp stripe sizes(5, 5)
+ // DUMP: OMPIntelStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp stripe sizes(5,5)
+ // PRINT: for (int i = start1; i < end1; i += 1)
+ // DUMP-NEXT: ForStmt
+ for (int i = start1; i < end1; i += 1)
+ // PRINT: for (int j = start2; j < end2; j += 1)
+ // DUMP: ForStmt
+ for (int j = start2; j < end2; j += 1)
+ // PRINT: body(i, j);
+ // DUMP: CallExpr
+ body(i, j);
+}
+
+// PRINT-LABEL: void foo3(
+// DUMP-LABEL: FunctionDecl {{.*}} foo3
+void foo3() {
+ // PRINT: #pragma omp for
+ // DUMP: OMPForDirective
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp for
+ // PRINT: #pragma omp stripe sizes(5)
+ // DUMP-NEXT: OMPIntelStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ #pragma omp stripe sizes(5)
+ for (int i = 7; i < 17; i += 3)
+ // PRINT: body(i);
+ // DUMP: CallExpr
+ body(i);
+}
+
+// PRINT-LABEL: void foo4(
+// DUMP-LABEL: FunctionDecl {{.*}} foo4
+void foo4() {
+ // PRINT: #pragma omp for collapse(2)
+ // DUMP: OMPForDirective
+ // DUMP-NEXT: OMPCollapseClause
+ // DUMP-NEXT: ConstantExpr
+ // DUMP-NEXT: value: Int 2
+ // DUMP-NEXT: IntegerLiteral {{.*}} 2
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp for collapse(2)
+ // PRINT: #pragma omp stripe sizes(5, 5)
+ // DUMP: OMPIntelStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp stripe sizes(5, 5)
+ // PRINT: for (int i = 7; i < 17; i += 1)
+ // DUMP-NEXT: ForStmt
+ for (int i = 7; i < 17; i += 1)
+ // PRINT: for (int j = 7; j < 17; j += 1)
+ // DUMP: ForStmt
+ for (int j = 7; j < 17; j += 1)
+ // PRINT: body(i, j);
+ // DUMP: CallExpr
+ body(i, j);
+}
+
+// PRINT-LABEL: void foo5(
+// DUMP-LABEL: FunctionDecl {{.*}} foo5
+void foo5(int start, int end, int step) {
+ // PRINT: #pragma omp for collapse(2)
+ // DUMP: OMPForDirective
+ // DUMP-NEXT: OMPCollapseClause
+ // DUMP-NEXT: ConstantExpr
+ // DUMP-NEXT: value: Int 2
+ // DUMP-NEXT: IntegerLiteral {{.*}} 2
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp for collapse(2)
+ // PRINT: for (int i = 7; i < 17; i += 1)
+ // DUMP-NEXT: ForStmt
+ for (int i = 7; i < 17; i += 1)
+ // PRINT: #pragma omp stripe sizes(5)
+ // DUMP: OMPIntelStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: IntegerLiteral {{.*}} 5
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp stripe sizes(5)
+ // PRINT: for (int j = 7; j < 17; j += 1)
+ // DUMP-NEXT: ForStmt
+ for (int j = 7; j < 17; j += 1)
+ // PRINT: body(i, j);
+ // DUMP: CallExpr
+ body(i, j);
+}
+
+// PRINT-LABEL: template <int Stripe> void foo6(int start, int stop, int step) {
+// DUMP-LABEL: FunctionTemplateDecl {{.*}} foo6
+template <int Stripe>
+void foo6(int start, int stop, int step) {
+ // PRINT: #pragma omp stripe sizes(Stripe)
+ // DUMP: OMPIntelStripeDirective
+ // DUMP-NEXT: OMPSizesClause
+ // DUMP-NEXT: DeclRefExpr {{.*}} 'Stripe' 'int'
+ // DUMP-NEXT: CapturedStmt
+ // DUMP-NEXT: CapturedDecl
+ #pragma omp stripe sizes(Stripe)
+ // PRINT-NEXT: for (int i = start; i < stop; i += step)
+ // DUMP-NEXT: ForStmt
+ for (int i = start; i < stop; i += step)
+ // PRINT-NEXT: body(i);
+ // DUMP: CallExpr
+ body(i);
+}
+
+void tfoo6() {
+ foo6<5>(0, 42, 2);
+}
diff --git a/clang/test/OpenMP/stripe_codegen.cpp b/clang/test/OpenMP/stripe_codegen.cpp
new file mode 100644
index 00000000000000..aea5b2f96402b3
--- /dev/null
+++ b/clang/test/OpenMP/stripe_codegen.cpp
@@ -0,0 +1,1544 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ --version 4
+// Check code generation
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -std=c++20 -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
+
+// Check same results after serialization round-trip
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -std=c++20 -fopenmp -emit-pch -o %t %s
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -std=c++20 -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2
+// expected-no-diagnostics
+
+#ifndef HEADER
+#define HEADER
+
+// placeholder for loop body code.
+extern "C" void body(...) {}
+
+
+struct S {
+ int i;
+ S() {
+#pragma omp stripe sizes(5)
+ for (i = 7; i < 17; i += 3)
+ body(i);
+ }
+} s;
+
+extern "C" void foo1(int start, int end, int step) {
+ int i;
+#pragma omp stripe sizes(5)
+ for (i = start; i < end; i += step)
+ body(i);
+}
+
+extern "C" void foo2(int start, int end, int step) {
+#pragma omp stripe sizes(5,5)
+ for (int i = 7; i < 17; i+=3)
+ for (int j = 7; j < 17; j+=3)
+ body(i,j);
+}
+
+extern "C" void foo3() {
+#pragma omp for
+#pragma omp stripe sizes(5,5)
+ for (int i = 7; i < 17; i += 3)
+ for (int j = 7; j < 17; j += 3)
+ body(i, j);
+}
+
+extern "C" void foo4() {
+#pragma omp for collapse(2)
+ for (int k = 7; k < 17; k += 3)
+#pragma omp stripe sizes(5,5)
+ for (int i = 7; i < 17; i += 3)
+ for (int j = 7; j < 17; j += 3)
+ body(i, j);
+}
+
+
+extern "C" void foo5() {
+#pragma omp for collapse(3)
+#pragma omp stripe sizes(5)
+ for (int i = 7; i < 17; i += 3)
+ for (int j = 7; j < 17; j += 3)
+ body(i, j);
+}
+
+
+extern "C" void foo6() {
+#pragma omp parallel for
+#pragma omp stripe sizes(5)
+ for (int i = 7; i < 17; i += 3)
+ body(i);
+}
+
+
+template<typename T, T Step, T Stripe>
+void foo7(T start, T end) {
+#pragma omp stripe sizes(Stripe)
+ for (T i = start; i < end; i += Step)
+ body(i);
+}
+
+extern "C" void tfoo7() {
+ foo7<int,3,5>(0, 42);
+}
+
+
+extern "C" void foo8(int a) {
+#pragma omp stripe sizes(a)
+ for (int i = 7; i < 17; i += 3)
+ body(i);
+}
+
+
+typedef struct { double array[12]; } data_t;
+extern "C" void foo9(data_t data) {
+#pragma omp stripe sizes(5)
+ for (double v : data.array)
+ body(v);
+}
+
+
+extern "C" void foo10(data_t data) {
+#pragma omp stripe sizes(5)
+ for (double c = 42.0; double v : data.array)
+ body(c, v);
+}
+
+
+#endif /* HEADER */
+
+// CHECK1-LABEL: define dso_local void @body(
+// CHECK1-SAME: ...) #[[ATTR0:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define internal void @__cxx_global_var_init(
+// CHECK1-SAME: ) #[[ATTR1:[0-9]+]] section ".text.startup" {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) @s)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define linkonce_odr void @_ZN1SC1Ev(
+// CHECK1-SAME: ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat align 2 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: call void @_ZN1SC2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define linkonce_odr void @_ZN1SC2Ev(
+// CHECK1-SAME: ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat align 2 {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo1(
+// CHECK1-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STEP_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo2(
+// CHECK1-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[STEP_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo3(
+// CHECK1-SAME: ) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTFLOOR_1_IV_J:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTSTRIPE_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTSTRIPE_1_IV_J:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
+// CHECK1-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK1-NEXT: store i32 7, ptr [[J]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 0
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 0, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
+// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 5
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: br label [[FOR_COND:%.*]]
+// CHECK1: for.cond:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP7]], 4
+// CHECK1-NEXT: br i1 [[CMP2]], label [[FOR_BODY:%.*]], label [[FOR_END32:%.*]]
+// CHECK1: for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: br label [[FOR_COND3:%.*]]
+// CHECK1: for.cond3:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 5
+// CHECK1-NEXT: [[CMP5:%.*]] = icmp slt i32 4, [[ADD4]]
+// CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
+// CHECK1: cond.true6:
+// CHECK1-NEXT: br label [[COND_END9:%.*]]
+// CHECK1: cond.false7:
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP11]], 5
+// CHECK1-NEXT: br label [[COND_END9]]
+// CHECK1: cond.end9:
+// CHECK1-NEXT: [[COND10:%.*]] = phi i32 [ 4, [[COND_TRUE6]] ], [ [[ADD8]], [[COND_FALSE7]] ]
+// CHECK1-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP9]], [[COND10]]
+// CHECK1-NEXT: br i1 [[CMP11]], label [[FOR_BODY12:%.*]], label [[FOR_END29:%.*]]
+// CHECK1: for.body12:
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP12]], 3
+// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 7, [[MUL13]]
+// CHECK1-NEXT: store i32 [[ADD14]], ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: br label [[FOR_COND15:%.*]]
+// CHECK1: for.cond15:
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP15]], 5
+// CHECK1-NEXT: [[CMP17:%.*]] = icmp slt i32 4, [[ADD16]]
+// CHECK1-NEXT: br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
+// CHECK1: cond.true18:
+// CHECK1-NEXT: br label [[COND_END21:%.*]]
+// CHECK1: cond.false19:
+// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP16]], 5
+// CHECK1-NEXT: br label [[COND_END21]]
+// CHECK1: cond.end21:
+// CHECK1-NEXT: [[COND22:%.*]] = phi i32 [ 4, [[COND_TRUE18]] ], [ [[ADD20]], [[COND_FALSE19]] ]
+// CHECK1-NEXT: [[CMP23:%.*]] = icmp slt i32 [[TMP14]], [[COND22]]
+// CHECK1-NEXT: br i1 [[CMP23]], label [[FOR_BODY24:%.*]], label [[FOR_END:%.*]]
+// CHECK1: for.body24:
+// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP17]], 3
+// CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 7, [[MUL25]]
+// CHECK1-NEXT: store i32 [[ADD26]], ptr [[J]], align 4
+// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[J]], align 4
+// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP18]], i32 noundef [[TMP19]])
+// CHECK1-NEXT: br label [[FOR_INC:%.*]]
+// CHECK1: for.inc:
+// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP20]], 1
+// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: br label [[FOR_COND15]], !llvm.loop [[LOOP3:![0-9]+]]
+// CHECK1: for.end:
+// CHECK1-NEXT: br label [[FOR_INC27:%.*]]
+// CHECK1: for.inc27:
+// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[INC28:%.*]] = add nsw i32 [[TMP21]], 1
+// CHECK1-NEXT: store i32 [[INC28]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: br label [[FOR_COND3]], !llvm.loop [[LOOP5:![0-9]+]]
+// CHECK1: for.end29:
+// CHECK1-NEXT: br label [[FOR_INC30:%.*]]
+// CHECK1: for.inc30:
+// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP22]], 5
+// CHECK1-NEXT: store i32 [[ADD31]], ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
+// CHECK1: for.end32:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD33:%.*]] = add nsw i32 [[TMP23]], 1
+// CHECK1-NEXT: store i32 [[ADD33]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
+// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo4(
+// CHECK1-SAME: ) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTFLOOR_1_IV_J:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTSTRIPE_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTSTRIPE_1_IV_J:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
+// CHECK1-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK1-NEXT: store i32 7, ptr [[J]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 3, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 3
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
+// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP6]], 1
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 3
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 7, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[K]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP8]], 1
+// CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 1
+// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], [[MUL4]]
+// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 5
+// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
+// CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: br label [[FOR_COND:%.*]]
+// CHECK1: for.cond:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP9]], 4
+// CHECK1-NEXT: br i1 [[CMP7]], label [[FOR_BODY:%.*]], label [[FOR_END37:%.*]]
+// CHECK1: for.body:
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: br label [[FOR_COND8:%.*]]
+// CHECK1: for.cond8:
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP12]], 5
+// CHECK1-NEXT: [[CMP10:%.*]] = icmp slt i32 4, [[ADD9]]
+// CHECK1-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
+// CHECK1: cond.true11:
+// CHECK1-NEXT: br label [[COND_END14:%.*]]
+// CHECK1: cond.false12:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP13]], 5
+// CHECK1-NEXT: br label [[COND_END14]]
+// CHECK1: cond.end14:
+// CHECK1-NEXT: [[COND15:%.*]] = phi i32 [ 4, [[COND_TRUE11]] ], [ [[ADD13]], [[COND_FALSE12]] ]
+// CHECK1-NEXT: [[CMP16:%.*]] = icmp slt i32 [[TMP11]], [[COND15]]
+// CHECK1-NEXT: br i1 [[CMP16]], label [[FOR_BODY17:%.*]], label [[FOR_END34:%.*]]
+// CHECK1: for.body17:
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[MUL18:%.*]] = mul nsw i32 [[TMP14]], 3
+// CHECK1-NEXT: [[ADD19:%.*]] = add nsw i32 7, [[MUL18]]
+// CHECK1-NEXT: store i32 [[ADD19]], ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: br label [[FOR_COND20:%.*]]
+// CHECK1: for.cond20:
+// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: [[ADD21:%.*]] = add nsw i32 [[TMP17]], 5
+// CHECK1-NEXT: [[CMP22:%.*]] = icmp slt i32 4, [[ADD21]]
+// CHECK1-NEXT: br i1 [[CMP22]], label [[COND_TRUE23:%.*]], label [[COND_FALSE24:%.*]]
+// CHECK1: cond.true23:
+// CHECK1-NEXT: br label [[COND_END26:%.*]]
+// CHECK1: cond.false24:
+// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP18]], 5
+// CHECK1-NEXT: br label [[COND_END26]]
+// CHECK1: cond.end26:
+// CHECK1-NEXT: [[COND27:%.*]] = phi i32 [ 4, [[COND_TRUE23]] ], [ [[ADD25]], [[COND_FALSE24]] ]
+// CHECK1-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP16]], [[COND27]]
+// CHECK1-NEXT: br i1 [[CMP28]], label [[FOR_BODY29:%.*]], label [[FOR_END:%.*]]
+// CHECK1: for.body29:
+// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: [[MUL30:%.*]] = mul nsw i32 [[TMP19]], 3
+// CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 7, [[MUL30]]
+// CHECK1-NEXT: store i32 [[ADD31]], ptr [[J]], align 4
+// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[J]], align 4
+// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP20]], i32 noundef [[TMP21]])
+// CHECK1-NEXT: br label [[FOR_INC:%.*]]
+// CHECK1: for.inc:
+// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP22]], 1
+// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK1-NEXT: br label [[FOR_COND20]], !llvm.loop [[LOOP7:![0-9]+]]
+// CHECK1: for.end:
+// CHECK1-NEXT: br label [[FOR_INC32:%.*]]
+// CHECK1: for.inc32:
+// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[INC33:%.*]] = add nsw i32 [[TMP23]], 1
+// CHECK1-NEXT: store i32 [[INC33]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: br label [[FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]]
+// CHECK1: for.end34:
+// CHECK1-NEXT: br label [[FOR_INC35:%.*]]
+// CHECK1: for.inc35:
+// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: [[ADD36:%.*]] = add nsw i32 [[TMP24]], 5
+// CHECK1-NEXT: store i32 [[ADD36]], ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
+// CHECK1: for.end37:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP25]], 1
+// CHECK1-NEXT: store i32 [[ADD38]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
+// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo5(
+// CHECK1-SAME: ) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTSTRIPE_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTFLOOR_0_IV_I11:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTSTRIPE_0_IV_I12:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[J13:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
+// CHECK1-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP]], align 4
+// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP]], align 4
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 5
+// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 4, [[ADD]]
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP]], align 4
+// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 5
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[ADD4]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]]
+// CHECK1-NEXT: [[SUB6:%.*]] = sub i32 [[SUB]], 1
+// CHECK1-NEXT: [[ADD7:%.*]] = add i32 [[SUB6]], 1
+// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD7]], 1
+// CHECK1-NEXT: [[CONV:%.*]] = zext i32 [[DIV]] to i64
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 1, [[CONV]]
+// CHECK1-NEXT: [[MUL8:%.*]] = mul nsw i64 [[MUL]], 4
+// CHECK1-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL8]], 1
+// CHECK1-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: store i32 7, ptr [[J]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[CMP10:%.*]] = icmp slt i32 [[TMP7]], [[TMP8]]
+// CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
+// CHECK1: omp.precond.then:
+// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
+// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_UB]], align 8
+// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1)
+// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
+// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK1-NEXT: [[CMP14:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
+// CHECK1-NEXT: br i1 [[CMP14]], label [[COND_TRUE15:%.*]], label [[COND_FALSE16:%.*]]
+// CHECK1: cond.true15:
+// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK1-NEXT: br label [[COND_END17:%.*]]
+// CHECK1: cond.false16:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
+// CHECK1-NEXT: br label [[COND_END17]]
+// CHECK1: cond.end17:
+// CHECK1-NEXT: [[COND18:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE15]] ], [ [[TMP13]], [[COND_FALSE16]] ]
+// CHECK1-NEXT: store i64 [[COND18]], ptr [[DOTOMP_UB]], align 8
+// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
+// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
+// CHECK1-NEXT: [[CMP19:%.*]] = icmp sle i64 [[TMP15]], [[TMP16]]
+// CHECK1-NEXT: br i1 [[CMP19]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB20:%.*]] = sub i32 [[TMP18]], [[TMP19]]
+// CHECK1-NEXT: [[SUB21:%.*]] = sub i32 [[SUB20]], 1
+// CHECK1-NEXT: [[ADD22:%.*]] = add i32 [[SUB21]], 1
+// CHECK1-NEXT: [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
+// CHECK1-NEXT: [[MUL24:%.*]] = mul i32 1, [[DIV23]]
+// CHECK1-NEXT: [[MUL25:%.*]] = mul i32 [[MUL24]], 4
+// CHECK1-NEXT: [[CONV26:%.*]] = zext i32 [[MUL25]] to i64
+// CHECK1-NEXT: [[DIV27:%.*]] = sdiv i64 [[TMP17]], [[CONV26]]
+// CHECK1-NEXT: [[MUL28:%.*]] = mul nsw i64 [[DIV27]], 5
+// CHECK1-NEXT: [[ADD29:%.*]] = add nsw i64 0, [[MUL28]]
+// CHECK1-NEXT: [[CONV30:%.*]] = trunc i64 [[ADD29]] to i32
+// CHECK1-NEXT: store i32 [[CONV30]], ptr [[DOTFLOOR_0_IV_I11]], align 4
+// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[CONV31:%.*]] = sext i32 [[TMP20]] to i64
+// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB32:%.*]] = sub i32 [[TMP23]], [[TMP24]]
+// CHECK1-NEXT: [[SUB33:%.*]] = sub i32 [[SUB32]], 1
+// CHECK1-NEXT: [[ADD34:%.*]] = add i32 [[SUB33]], 1
+// CHECK1-NEXT: [[DIV35:%.*]] = udiv i32 [[ADD34]], 1
+// CHECK1-NEXT: [[MUL36:%.*]] = mul i32 1, [[DIV35]]
+// CHECK1-NEXT: [[MUL37:%.*]] = mul i32 [[MUL36]], 4
+// CHECK1-NEXT: [[CONV38:%.*]] = zext i32 [[MUL37]] to i64
+// CHECK1-NEXT: [[DIV39:%.*]] = sdiv i64 [[TMP22]], [[CONV38]]
+// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB40:%.*]] = sub i32 [[TMP25]], [[TMP26]]
+// CHECK1-NEXT: [[SUB41:%.*]] = sub i32 [[SUB40]], 1
+// CHECK1-NEXT: [[ADD42:%.*]] = add i32 [[SUB41]], 1
+// CHECK1-NEXT: [[DIV43:%.*]] = udiv i32 [[ADD42]], 1
+// CHECK1-NEXT: [[MUL44:%.*]] = mul i32 1, [[DIV43]]
+// CHECK1-NEXT: [[MUL45:%.*]] = mul i32 [[MUL44]], 4
+// CHECK1-NEXT: [[CONV46:%.*]] = zext i32 [[MUL45]] to i64
+// CHECK1-NEXT: [[MUL47:%.*]] = mul nsw i64 [[DIV39]], [[CONV46]]
+// CHECK1-NEXT: [[SUB48:%.*]] = sub nsw i64 [[TMP21]], [[MUL47]]
+// CHECK1-NEXT: [[DIV49:%.*]] = sdiv i64 [[SUB48]], 4
+// CHECK1-NEXT: [[MUL50:%.*]] = mul nsw i64 [[DIV49]], 1
+// CHECK1-NEXT: [[ADD51:%.*]] = add nsw i64 [[CONV31]], [[MUL50]]
+// CHECK1-NEXT: [[CONV52:%.*]] = trunc i64 [[ADD51]] to i32
+// CHECK1-NEXT: store i32 [[CONV52]], ptr [[DOTSTRIPE_0_IV_I12]], align 4
+// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB53:%.*]] = sub i32 [[TMP29]], [[TMP30]]
+// CHECK1-NEXT: [[SUB54:%.*]] = sub i32 [[SUB53]], 1
+// CHECK1-NEXT: [[ADD55:%.*]] = add i32 [[SUB54]], 1
+// CHECK1-NEXT: [[DIV56:%.*]] = udiv i32 [[ADD55]], 1
+// CHECK1-NEXT: [[MUL57:%.*]] = mul i32 1, [[DIV56]]
+// CHECK1-NEXT: [[MUL58:%.*]] = mul i32 [[MUL57]], 4
+// CHECK1-NEXT: [[CONV59:%.*]] = zext i32 [[MUL58]] to i64
+// CHECK1-NEXT: [[DIV60:%.*]] = sdiv i64 [[TMP28]], [[CONV59]]
+// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB61:%.*]] = sub i32 [[TMP31]], [[TMP32]]
+// CHECK1-NEXT: [[SUB62:%.*]] = sub i32 [[SUB61]], 1
+// CHECK1-NEXT: [[ADD63:%.*]] = add i32 [[SUB62]], 1
+// CHECK1-NEXT: [[DIV64:%.*]] = udiv i32 [[ADD63]], 1
+// CHECK1-NEXT: [[MUL65:%.*]] = mul i32 1, [[DIV64]]
+// CHECK1-NEXT: [[MUL66:%.*]] = mul i32 [[MUL65]], 4
+// CHECK1-NEXT: [[CONV67:%.*]] = zext i32 [[MUL66]] to i64
+// CHECK1-NEXT: [[MUL68:%.*]] = mul nsw i64 [[DIV60]], [[CONV67]]
+// CHECK1-NEXT: [[SUB69:%.*]] = sub nsw i64 [[TMP27]], [[MUL68]]
+// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB70:%.*]] = sub i32 [[TMP35]], [[TMP36]]
+// CHECK1-NEXT: [[SUB71:%.*]] = sub i32 [[SUB70]], 1
+// CHECK1-NEXT: [[ADD72:%.*]] = add i32 [[SUB71]], 1
+// CHECK1-NEXT: [[DIV73:%.*]] = udiv i32 [[ADD72]], 1
+// CHECK1-NEXT: [[MUL74:%.*]] = mul i32 1, [[DIV73]]
+// CHECK1-NEXT: [[MUL75:%.*]] = mul i32 [[MUL74]], 4
+// CHECK1-NEXT: [[CONV76:%.*]] = zext i32 [[MUL75]] to i64
+// CHECK1-NEXT: [[DIV77:%.*]] = sdiv i64 [[TMP34]], [[CONV76]]
+// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB78:%.*]] = sub i32 [[TMP37]], [[TMP38]]
+// CHECK1-NEXT: [[SUB79:%.*]] = sub i32 [[SUB78]], 1
+// CHECK1-NEXT: [[ADD80:%.*]] = add i32 [[SUB79]], 1
+// CHECK1-NEXT: [[DIV81:%.*]] = udiv i32 [[ADD80]], 1
+// CHECK1-NEXT: [[MUL82:%.*]] = mul i32 1, [[DIV81]]
+// CHECK1-NEXT: [[MUL83:%.*]] = mul i32 [[MUL82]], 4
+// CHECK1-NEXT: [[CONV84:%.*]] = zext i32 [[MUL83]] to i64
+// CHECK1-NEXT: [[MUL85:%.*]] = mul nsw i64 [[DIV77]], [[CONV84]]
+// CHECK1-NEXT: [[SUB86:%.*]] = sub nsw i64 [[TMP33]], [[MUL85]]
+// CHECK1-NEXT: [[DIV87:%.*]] = sdiv i64 [[SUB86]], 4
+// CHECK1-NEXT: [[MUL88:%.*]] = mul nsw i64 [[DIV87]], 4
+// CHECK1-NEXT: [[SUB89:%.*]] = sub nsw i64 [[SUB69]], [[MUL88]]
+// CHECK1-NEXT: [[MUL90:%.*]] = mul nsw i64 [[SUB89]], 3
+// CHECK1-NEXT: [[ADD91:%.*]] = add nsw i64 7, [[MUL90]]
+// CHECK1-NEXT: [[CONV92:%.*]] = trunc i64 [[ADD91]] to i32
+// CHECK1-NEXT: store i32 [[CONV92]], ptr [[J13]], align 4
+// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I12]], align 4
+// CHECK1-NEXT: [[MUL93:%.*]] = mul nsw i32 [[TMP39]], 3
+// CHECK1-NEXT: [[ADD94:%.*]] = add nsw i32 7, [[MUL93]]
+// CHECK1-NEXT: store i32 [[ADD94]], ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[J13]], align 4
+// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP40]], i32 noundef [[TMP41]])
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: [[ADD95:%.*]] = add nsw i64 [[TMP42]], 1
+// CHECK1-NEXT: store i64 [[ADD95]], ptr [[DOTOMP_IV]], align 8
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
+// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
+// CHECK1: omp.precond.end:
+// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo6(
+// CHECK1-SAME: ) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 0, ptr @foo6.omp_outlined)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define internal void @foo6.omp_outlined(
+// CHECK1-SAME: ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[DOTSTRIPE_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK1-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 0
+// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 0, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK1: omp.inner.for.cond:
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK1: omp.inner.for.body:
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: br label [[FOR_COND:%.*]]
+// CHECK1: for.cond:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 5
+// CHECK1-NEXT: [[CMP3:%.*]] = icmp slt i32 4, [[ADD2]]
+// CHECK1-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
+// CHECK1: cond.true4:
+// CHECK1-NEXT: br label [[COND_END7:%.*]]
+// CHECK1: cond.false5:
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 5
+// CHECK1-NEXT: br label [[COND_END7]]
+// CHECK1: cond.end7:
+// CHECK1-NEXT: [[COND8:%.*]] = phi i32 [ 4, [[COND_TRUE4]] ], [ [[ADD6]], [[COND_FALSE5]] ]
+// CHECK1-NEXT: [[CMP9:%.*]] = icmp slt i32 [[TMP9]], [[COND8]]
+// CHECK1-NEXT: br i1 [[CMP9]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
+// CHECK1: for.body:
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP12]], 3
+// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 7, [[MUL10]]
+// CHECK1-NEXT: store i32 [[ADD11]], ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
+// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP13]])
+// CHECK1-NEXT: br label [[FOR_INC:%.*]]
+// CHECK1: for.inc:
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
+// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
+// CHECK1: for.end:
+// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK1: omp.body.continue:
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK1: omp.inner.for.inc:
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP15]], 1
+// CHECK1-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4
+// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK1: omp.inner.for.end:
+// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK1: omp.loop.exit:
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @tfoo7(
+// CHECK1-SAME: ) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: call void @_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_(i32 noundef 0, i32 noundef 42)
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define linkonce_odr void @_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_(
+// CHECK1-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]]) #[[ATTR0]] comdat {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo8(
+// CHECK1-SAME: i32 noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo9(
+// CHECK1-SAME: ptr noundef byval([[STRUCT_DATA_T:%.*]]) align 8 [[DATA:%.*]]) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo10(
+// CHECK1-SAME: ptr noundef byval([[STRUCT_DATA_T:%.*]]) align 8 [[DATA:%.*]]) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define internal void @_GLOBAL__sub_I_stripe_codegen.cpp(
+// CHECK1-SAME: ) #[[ATTR1]] section ".text.startup" {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: call void @__cxx_global_var_init()
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define internal void @__cxx_global_var_init(
+// CHECK2-SAME: ) #[[ATTR0:[0-9]+]] section ".text.startup" {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) @s)
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define linkonce_odr void @_ZN1SC1Ev(
+// CHECK2-SAME: ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK2-NEXT: call void @_ZN1SC2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]])
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define linkonce_odr void @_ZN1SC2Ev(
+// CHECK2-SAME: ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @body(
+// CHECK2-SAME: ...) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo1(
+// CHECK2-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[STEP_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4
+// CHECK2-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4
+// CHECK2-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo10(
+// CHECK2-SAME: ptr noundef byval([[STRUCT_DATA_T:%.*]]) align 8 [[DATA:%.*]]) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo2(
+// CHECK2-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[STEP_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4
+// CHECK2-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4
+// CHECK2-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo3(
+// CHECK2-SAME: ) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTFLOOR_1_IV_J:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTSTRIPE_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTSTRIPE_1_IV_J:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
+// CHECK2-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK2-NEXT: store i32 7, ptr [[J]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 0
+// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK2: cond.true:
+// CHECK2-NEXT: br label [[COND_END:%.*]]
+// CHECK2: cond.false:
+// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: br label [[COND_END]]
+// CHECK2: cond.end:
+// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 0, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
+// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK2: omp.inner.for.cond:
+// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
+// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK2: omp.inner.for.body:
+// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 5
+// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: br label [[FOR_COND:%.*]]
+// CHECK2: for.cond:
+// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP7]], 4
+// CHECK2-NEXT: br i1 [[CMP2]], label [[FOR_BODY:%.*]], label [[FOR_END32:%.*]]
+// CHECK2: for.body:
+// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: store i32 [[TMP8]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: br label [[FOR_COND3:%.*]]
+// CHECK2: for.cond3:
+// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 5
+// CHECK2-NEXT: [[CMP5:%.*]] = icmp slt i32 4, [[ADD4]]
+// CHECK2-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
+// CHECK2: cond.true6:
+// CHECK2-NEXT: br label [[COND_END9:%.*]]
+// CHECK2: cond.false7:
+// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP11]], 5
+// CHECK2-NEXT: br label [[COND_END9]]
+// CHECK2: cond.end9:
+// CHECK2-NEXT: [[COND10:%.*]] = phi i32 [ 4, [[COND_TRUE6]] ], [ [[ADD8]], [[COND_FALSE7]] ]
+// CHECK2-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP9]], [[COND10]]
+// CHECK2-NEXT: br i1 [[CMP11]], label [[FOR_BODY12:%.*]], label [[FOR_END29:%.*]]
+// CHECK2: for.body12:
+// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP12]], 3
+// CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 7, [[MUL13]]
+// CHECK2-NEXT: store i32 [[ADD14]], ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: store i32 [[TMP13]], ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: br label [[FOR_COND15:%.*]]
+// CHECK2: for.cond15:
+// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP15]], 5
+// CHECK2-NEXT: [[CMP17:%.*]] = icmp slt i32 4, [[ADD16]]
+// CHECK2-NEXT: br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
+// CHECK2: cond.true18:
+// CHECK2-NEXT: br label [[COND_END21:%.*]]
+// CHECK2: cond.false19:
+// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP16]], 5
+// CHECK2-NEXT: br label [[COND_END21]]
+// CHECK2: cond.end21:
+// CHECK2-NEXT: [[COND22:%.*]] = phi i32 [ 4, [[COND_TRUE18]] ], [ [[ADD20]], [[COND_FALSE19]] ]
+// CHECK2-NEXT: [[CMP23:%.*]] = icmp slt i32 [[TMP14]], [[COND22]]
+// CHECK2-NEXT: br i1 [[CMP23]], label [[FOR_BODY24:%.*]], label [[FOR_END:%.*]]
+// CHECK2: for.body24:
+// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP17]], 3
+// CHECK2-NEXT: [[ADD26:%.*]] = add nsw i32 7, [[MUL25]]
+// CHECK2-NEXT: store i32 [[ADD26]], ptr [[J]], align 4
+// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[J]], align 4
+// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP18]], i32 noundef [[TMP19]])
+// CHECK2-NEXT: br label [[FOR_INC:%.*]]
+// CHECK2: for.inc:
+// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP20]], 1
+// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: br label [[FOR_COND15]], !llvm.loop [[LOOP3:![0-9]+]]
+// CHECK2: for.end:
+// CHECK2-NEXT: br label [[FOR_INC27:%.*]]
+// CHECK2: for.inc27:
+// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[INC28:%.*]] = add nsw i32 [[TMP21]], 1
+// CHECK2-NEXT: store i32 [[INC28]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: br label [[FOR_COND3]], !llvm.loop [[LOOP5:![0-9]+]]
+// CHECK2: for.end29:
+// CHECK2-NEXT: br label [[FOR_INC30:%.*]]
+// CHECK2: for.inc30:
+// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP22]], 5
+// CHECK2-NEXT: store i32 [[ADD31]], ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
+// CHECK2: for.end32:
+// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK2: omp.body.continue:
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK2: omp.inner.for.inc:
+// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[ADD33:%.*]] = add nsw i32 [[TMP23]], 1
+// CHECK2-NEXT: store i32 [[ADD33]], ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK2: omp.inner.for.end:
+// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK2: omp.loop.exit:
+// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
+// CHECK2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP0]])
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo4(
+// CHECK2-SAME: ) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTFLOOR_1_IV_J:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTSTRIPE_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTSTRIPE_1_IV_J:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
+// CHECK2-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK2-NEXT: store i32 7, ptr [[J]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK2-NEXT: store i32 3, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 3
+// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK2: cond.true:
+// CHECK2-NEXT: br label [[COND_END:%.*]]
+// CHECK2: cond.false:
+// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: br label [[COND_END]]
+// CHECK2: cond.end:
+// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
+// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK2: omp.inner.for.cond:
+// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
+// CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK2: omp.inner.for.body:
+// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP6]], 1
+// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 3
+// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 7, [[MUL]]
+// CHECK2-NEXT: store i32 [[ADD]], ptr [[K]], align 4
+// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP8]], 1
+// CHECK2-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 1
+// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], [[MUL4]]
+// CHECK2-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 5
+// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
+// CHECK2-NEXT: store i32 [[ADD6]], ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: br label [[FOR_COND:%.*]]
+// CHECK2: for.cond:
+// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP9]], 4
+// CHECK2-NEXT: br i1 [[CMP7]], label [[FOR_BODY:%.*]], label [[FOR_END37:%.*]]
+// CHECK2: for.body:
+// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: store i32 [[TMP10]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: br label [[FOR_COND8:%.*]]
+// CHECK2: for.cond8:
+// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP12]], 5
+// CHECK2-NEXT: [[CMP10:%.*]] = icmp slt i32 4, [[ADD9]]
+// CHECK2-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
+// CHECK2: cond.true11:
+// CHECK2-NEXT: br label [[COND_END14:%.*]]
+// CHECK2: cond.false12:
+// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP13]], 5
+// CHECK2-NEXT: br label [[COND_END14]]
+// CHECK2: cond.end14:
+// CHECK2-NEXT: [[COND15:%.*]] = phi i32 [ 4, [[COND_TRUE11]] ], [ [[ADD13]], [[COND_FALSE12]] ]
+// CHECK2-NEXT: [[CMP16:%.*]] = icmp slt i32 [[TMP11]], [[COND15]]
+// CHECK2-NEXT: br i1 [[CMP16]], label [[FOR_BODY17:%.*]], label [[FOR_END34:%.*]]
+// CHECK2: for.body17:
+// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[MUL18:%.*]] = mul nsw i32 [[TMP14]], 3
+// CHECK2-NEXT: [[ADD19:%.*]] = add nsw i32 7, [[MUL18]]
+// CHECK2-NEXT: store i32 [[ADD19]], ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: store i32 [[TMP15]], ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: br label [[FOR_COND20:%.*]]
+// CHECK2: for.cond20:
+// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: [[ADD21:%.*]] = add nsw i32 [[TMP17]], 5
+// CHECK2-NEXT: [[CMP22:%.*]] = icmp slt i32 4, [[ADD21]]
+// CHECK2-NEXT: br i1 [[CMP22]], label [[COND_TRUE23:%.*]], label [[COND_FALSE24:%.*]]
+// CHECK2: cond.true23:
+// CHECK2-NEXT: br label [[COND_END26:%.*]]
+// CHECK2: cond.false24:
+// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP18]], 5
+// CHECK2-NEXT: br label [[COND_END26]]
+// CHECK2: cond.end26:
+// CHECK2-NEXT: [[COND27:%.*]] = phi i32 [ 4, [[COND_TRUE23]] ], [ [[ADD25]], [[COND_FALSE24]] ]
+// CHECK2-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP16]], [[COND27]]
+// CHECK2-NEXT: br i1 [[CMP28]], label [[FOR_BODY29:%.*]], label [[FOR_END:%.*]]
+// CHECK2: for.body29:
+// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: [[MUL30:%.*]] = mul nsw i32 [[TMP19]], 3
+// CHECK2-NEXT: [[ADD31:%.*]] = add nsw i32 7, [[MUL30]]
+// CHECK2-NEXT: store i32 [[ADD31]], ptr [[J]], align 4
+// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[J]], align 4
+// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP20]], i32 noundef [[TMP21]])
+// CHECK2-NEXT: br label [[FOR_INC:%.*]]
+// CHECK2: for.inc:
+// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP22]], 1
+// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTSTRIPE_1_IV_J]], align 4
+// CHECK2-NEXT: br label [[FOR_COND20]], !llvm.loop [[LOOP7:![0-9]+]]
+// CHECK2: for.end:
+// CHECK2-NEXT: br label [[FOR_INC32:%.*]]
+// CHECK2: for.inc32:
+// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[INC33:%.*]] = add nsw i32 [[TMP23]], 1
+// CHECK2-NEXT: store i32 [[INC33]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: br label [[FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]]
+// CHECK2: for.end34:
+// CHECK2-NEXT: br label [[FOR_INC35:%.*]]
+// CHECK2: for.inc35:
+// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: [[ADD36:%.*]] = add nsw i32 [[TMP24]], 5
+// CHECK2-NEXT: store i32 [[ADD36]], ptr [[DOTFLOOR_1_IV_J]], align 4
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
+// CHECK2: for.end37:
+// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK2: omp.body.continue:
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK2: omp.inner.for.inc:
+// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP25]], 1
+// CHECK2-NEXT: store i32 [[ADD38]], ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK2: omp.inner.for.end:
+// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK2: omp.loop.exit:
+// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
+// CHECK2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo5(
+// CHECK2-SAME: ) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTSTRIPE_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTFLOOR_0_IV_I11:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTSTRIPE_0_IV_I12:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[J13:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
+// CHECK2-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP]], align 4
+// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP]], align 4
+// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 5
+// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 4, [[ADD]]
+// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK2: cond.true:
+// CHECK2-NEXT: br label [[COND_END:%.*]]
+// CHECK2: cond.false:
+// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP]], align 4
+// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], 5
+// CHECK2-NEXT: br label [[COND_END]]
+// CHECK2: cond.end:
+// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[ADD4]], [[COND_FALSE]] ]
+// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]]
+// CHECK2-NEXT: [[SUB6:%.*]] = sub i32 [[SUB]], 1
+// CHECK2-NEXT: [[ADD7:%.*]] = add i32 [[SUB6]], 1
+// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD7]], 1
+// CHECK2-NEXT: [[CONV:%.*]] = zext i32 [[DIV]] to i64
+// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 1, [[CONV]]
+// CHECK2-NEXT: [[MUL8:%.*]] = mul nsw i64 [[MUL]], 4
+// CHECK2-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL8]], 1
+// CHECK2-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK2-NEXT: store i32 0, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: store i32 [[TMP6]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: store i32 7, ptr [[J]], align 4
+// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[CMP10:%.*]] = icmp slt i32 [[TMP7]], [[TMP8]]
+// CHECK2-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
+// CHECK2: omp.precond.then:
+// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
+// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK2-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_UB]], align 8
+// CHECK2-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK2-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1)
+// CHECK2-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
+// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK2-NEXT: [[CMP14:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
+// CHECK2-NEXT: br i1 [[CMP14]], label [[COND_TRUE15:%.*]], label [[COND_FALSE16:%.*]]
+// CHECK2: cond.true15:
+// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8
+// CHECK2-NEXT: br label [[COND_END17:%.*]]
+// CHECK2: cond.false16:
+// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
+// CHECK2-NEXT: br label [[COND_END17]]
+// CHECK2: cond.end17:
+// CHECK2-NEXT: [[COND18:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE15]] ], [ [[TMP13]], [[COND_FALSE16]] ]
+// CHECK2-NEXT: store i64 [[COND18]], ptr [[DOTOMP_UB]], align 8
+// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
+// CHECK2-NEXT: store i64 [[TMP14]], ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK2: omp.inner.for.cond:
+// CHECK2-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8
+// CHECK2-NEXT: [[CMP19:%.*]] = icmp sle i64 [[TMP15]], [[TMP16]]
+// CHECK2-NEXT: br i1 [[CMP19]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK2: omp.inner.for.body:
+// CHECK2-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB20:%.*]] = sub i32 [[TMP18]], [[TMP19]]
+// CHECK2-NEXT: [[SUB21:%.*]] = sub i32 [[SUB20]], 1
+// CHECK2-NEXT: [[ADD22:%.*]] = add i32 [[SUB21]], 1
+// CHECK2-NEXT: [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
+// CHECK2-NEXT: [[MUL24:%.*]] = mul i32 1, [[DIV23]]
+// CHECK2-NEXT: [[MUL25:%.*]] = mul i32 [[MUL24]], 4
+// CHECK2-NEXT: [[CONV26:%.*]] = zext i32 [[MUL25]] to i64
+// CHECK2-NEXT: [[DIV27:%.*]] = sdiv i64 [[TMP17]], [[CONV26]]
+// CHECK2-NEXT: [[MUL28:%.*]] = mul nsw i64 [[DIV27]], 5
+// CHECK2-NEXT: [[ADD29:%.*]] = add nsw i64 0, [[MUL28]]
+// CHECK2-NEXT: [[CONV30:%.*]] = trunc i64 [[ADD29]] to i32
+// CHECK2-NEXT: store i32 [[CONV30]], ptr [[DOTFLOOR_0_IV_I11]], align 4
+// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[CONV31:%.*]] = sext i32 [[TMP20]] to i64
+// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB32:%.*]] = sub i32 [[TMP23]], [[TMP24]]
+// CHECK2-NEXT: [[SUB33:%.*]] = sub i32 [[SUB32]], 1
+// CHECK2-NEXT: [[ADD34:%.*]] = add i32 [[SUB33]], 1
+// CHECK2-NEXT: [[DIV35:%.*]] = udiv i32 [[ADD34]], 1
+// CHECK2-NEXT: [[MUL36:%.*]] = mul i32 1, [[DIV35]]
+// CHECK2-NEXT: [[MUL37:%.*]] = mul i32 [[MUL36]], 4
+// CHECK2-NEXT: [[CONV38:%.*]] = zext i32 [[MUL37]] to i64
+// CHECK2-NEXT: [[DIV39:%.*]] = sdiv i64 [[TMP22]], [[CONV38]]
+// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB40:%.*]] = sub i32 [[TMP25]], [[TMP26]]
+// CHECK2-NEXT: [[SUB41:%.*]] = sub i32 [[SUB40]], 1
+// CHECK2-NEXT: [[ADD42:%.*]] = add i32 [[SUB41]], 1
+// CHECK2-NEXT: [[DIV43:%.*]] = udiv i32 [[ADD42]], 1
+// CHECK2-NEXT: [[MUL44:%.*]] = mul i32 1, [[DIV43]]
+// CHECK2-NEXT: [[MUL45:%.*]] = mul i32 [[MUL44]], 4
+// CHECK2-NEXT: [[CONV46:%.*]] = zext i32 [[MUL45]] to i64
+// CHECK2-NEXT: [[MUL47:%.*]] = mul nsw i64 [[DIV39]], [[CONV46]]
+// CHECK2-NEXT: [[SUB48:%.*]] = sub nsw i64 [[TMP21]], [[MUL47]]
+// CHECK2-NEXT: [[DIV49:%.*]] = sdiv i64 [[SUB48]], 4
+// CHECK2-NEXT: [[MUL50:%.*]] = mul nsw i64 [[DIV49]], 1
+// CHECK2-NEXT: [[ADD51:%.*]] = add nsw i64 [[CONV31]], [[MUL50]]
+// CHECK2-NEXT: [[CONV52:%.*]] = trunc i64 [[ADD51]] to i32
+// CHECK2-NEXT: store i32 [[CONV52]], ptr [[DOTSTRIPE_0_IV_I12]], align 4
+// CHECK2-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB53:%.*]] = sub i32 [[TMP29]], [[TMP30]]
+// CHECK2-NEXT: [[SUB54:%.*]] = sub i32 [[SUB53]], 1
+// CHECK2-NEXT: [[ADD55:%.*]] = add i32 [[SUB54]], 1
+// CHECK2-NEXT: [[DIV56:%.*]] = udiv i32 [[ADD55]], 1
+// CHECK2-NEXT: [[MUL57:%.*]] = mul i32 1, [[DIV56]]
+// CHECK2-NEXT: [[MUL58:%.*]] = mul i32 [[MUL57]], 4
+// CHECK2-NEXT: [[CONV59:%.*]] = zext i32 [[MUL58]] to i64
+// CHECK2-NEXT: [[DIV60:%.*]] = sdiv i64 [[TMP28]], [[CONV59]]
+// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB61:%.*]] = sub i32 [[TMP31]], [[TMP32]]
+// CHECK2-NEXT: [[SUB62:%.*]] = sub i32 [[SUB61]], 1
+// CHECK2-NEXT: [[ADD63:%.*]] = add i32 [[SUB62]], 1
+// CHECK2-NEXT: [[DIV64:%.*]] = udiv i32 [[ADD63]], 1
+// CHECK2-NEXT: [[MUL65:%.*]] = mul i32 1, [[DIV64]]
+// CHECK2-NEXT: [[MUL66:%.*]] = mul i32 [[MUL65]], 4
+// CHECK2-NEXT: [[CONV67:%.*]] = zext i32 [[MUL66]] to i64
+// CHECK2-NEXT: [[MUL68:%.*]] = mul nsw i64 [[DIV60]], [[CONV67]]
+// CHECK2-NEXT: [[SUB69:%.*]] = sub nsw i64 [[TMP27]], [[MUL68]]
+// CHECK2-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB70:%.*]] = sub i32 [[TMP35]], [[TMP36]]
+// CHECK2-NEXT: [[SUB71:%.*]] = sub i32 [[SUB70]], 1
+// CHECK2-NEXT: [[ADD72:%.*]] = add i32 [[SUB71]], 1
+// CHECK2-NEXT: [[DIV73:%.*]] = udiv i32 [[ADD72]], 1
+// CHECK2-NEXT: [[MUL74:%.*]] = mul i32 1, [[DIV73]]
+// CHECK2-NEXT: [[MUL75:%.*]] = mul i32 [[MUL74]], 4
+// CHECK2-NEXT: [[CONV76:%.*]] = zext i32 [[MUL75]] to i64
+// CHECK2-NEXT: [[DIV77:%.*]] = sdiv i64 [[TMP34]], [[CONV76]]
+// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
+// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB78:%.*]] = sub i32 [[TMP37]], [[TMP38]]
+// CHECK2-NEXT: [[SUB79:%.*]] = sub i32 [[SUB78]], 1
+// CHECK2-NEXT: [[ADD80:%.*]] = add i32 [[SUB79]], 1
+// CHECK2-NEXT: [[DIV81:%.*]] = udiv i32 [[ADD80]], 1
+// CHECK2-NEXT: [[MUL82:%.*]] = mul i32 1, [[DIV81]]
+// CHECK2-NEXT: [[MUL83:%.*]] = mul i32 [[MUL82]], 4
+// CHECK2-NEXT: [[CONV84:%.*]] = zext i32 [[MUL83]] to i64
+// CHECK2-NEXT: [[MUL85:%.*]] = mul nsw i64 [[DIV77]], [[CONV84]]
+// CHECK2-NEXT: [[SUB86:%.*]] = sub nsw i64 [[TMP33]], [[MUL85]]
+// CHECK2-NEXT: [[DIV87:%.*]] = sdiv i64 [[SUB86]], 4
+// CHECK2-NEXT: [[MUL88:%.*]] = mul nsw i64 [[DIV87]], 4
+// CHECK2-NEXT: [[SUB89:%.*]] = sub nsw i64 [[SUB69]], [[MUL88]]
+// CHECK2-NEXT: [[MUL90:%.*]] = mul nsw i64 [[SUB89]], 3
+// CHECK2-NEXT: [[ADD91:%.*]] = add nsw i64 7, [[MUL90]]
+// CHECK2-NEXT: [[CONV92:%.*]] = trunc i64 [[ADD91]] to i32
+// CHECK2-NEXT: store i32 [[CONV92]], ptr [[J13]], align 4
+// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I12]], align 4
+// CHECK2-NEXT: [[MUL93:%.*]] = mul nsw i32 [[TMP39]], 3
+// CHECK2-NEXT: [[ADD94:%.*]] = add nsw i32 7, [[MUL93]]
+// CHECK2-NEXT: store i32 [[ADD94]], ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[J13]], align 4
+// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP40]], i32 noundef [[TMP41]])
+// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK2: omp.body.continue:
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK2: omp.inner.for.inc:
+// CHECK2-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: [[ADD95:%.*]] = add nsw i64 [[TMP42]], 1
+// CHECK2-NEXT: store i64 [[ADD95]], ptr [[DOTOMP_IV]], align 8
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK2: omp.inner.for.end:
+// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK2: omp.loop.exit:
+// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP0]])
+// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
+// CHECK2: omp.precond.end:
+// CHECK2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo6(
+// CHECK2-SAME: ) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 0, ptr @foo6.omp_outlined)
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define internal void @foo6.omp_outlined(
+// CHECK2-SAME: ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[DOTSTRIPE_0_IV_I:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK2-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
+// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
+// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
+// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 0
+// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK2: cond.true:
+// CHECK2-NEXT: br label [[COND_END:%.*]]
+// CHECK2: cond.false:
+// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: br label [[COND_END]]
+// CHECK2: cond.end:
+// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 0, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
+// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
+// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
+// CHECK2: omp.inner.for.cond:
+// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
+// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
+// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
+// CHECK2: omp.inner.for.body:
+// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
+// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
+// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: store i32 [[TMP8]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: br label [[FOR_COND:%.*]]
+// CHECK2: for.cond:
+// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 5
+// CHECK2-NEXT: [[CMP3:%.*]] = icmp slt i32 4, [[ADD2]]
+// CHECK2-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
+// CHECK2: cond.true4:
+// CHECK2-NEXT: br label [[COND_END7:%.*]]
+// CHECK2: cond.false5:
+// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 5
+// CHECK2-NEXT: br label [[COND_END7]]
+// CHECK2: cond.end7:
+// CHECK2-NEXT: [[COND8:%.*]] = phi i32 [ 4, [[COND_TRUE4]] ], [ [[ADD6]], [[COND_FALSE5]] ]
+// CHECK2-NEXT: [[CMP9:%.*]] = icmp slt i32 [[TMP9]], [[COND8]]
+// CHECK2-NEXT: br i1 [[CMP9]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
+// CHECK2: for.body:
+// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP12]], 3
+// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 7, [[MUL10]]
+// CHECK2-NEXT: store i32 [[ADD11]], ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
+// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP13]])
+// CHECK2-NEXT: br label [[FOR_INC:%.*]]
+// CHECK2: for.inc:
+// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
+// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTSTRIPE_0_IV_I]], align 4
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
+// CHECK2: for.end:
+// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
+// CHECK2: omp.body.continue:
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
+// CHECK2: omp.inner.for.inc:
+// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP15]], 1
+// CHECK2-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4
+// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
+// CHECK2: omp.inner.for.end:
+// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
+// CHECK2: omp.loop.exit:
+// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo8(
+// CHECK2-SAME: i32 noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo9(
+// CHECK2-SAME: ptr noundef byval([[STRUCT_DATA_T:%.*]]) align 8 [[DATA:%.*]]) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @tfoo7(
+// CHECK2-SAME: ) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: call void @_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_(i32 noundef 0, i32 noundef 42)
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define linkonce_odr void @_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_(
+// CHECK2-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]]) #[[ATTR1]] comdat {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
+// CHECK2-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4
+// CHECK2-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define internal void @_GLOBAL__sub_I_stripe_codegen.cpp(
+// CHECK2-SAME: ) #[[ATTR0]] section ".text.startup" {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: call void @__cxx_global_var_init()
+// CHECK2-NEXT: ret void
+//
+//.
+// CHECK1: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
+// CHECK1: [[META4]] = !{!"llvm.loop.mustprogress"}
+// CHECK1: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]}
+// CHECK1: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]}
+// CHECK1: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]}
+// CHECK1: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]}
+// CHECK1: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]}
+// CHECK1: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]}
+//.
+// CHECK2: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
+// CHECK2: [[META4]] = !{!"llvm.loop.mustprogress"}
+// CHECK2: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]}
+// CHECK2: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]}
+// CHECK2: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]}
+// CHECK2: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]}
+// CHECK2: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]}
+// CHECK2: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]}
+//.
diff --git a/clang/test/OpenMP/stripe_codegen_for_dependent.cpp b/clang/test/OpenMP/stripe_codegen_for_dependent.cpp
new file mode 100644
index 00000000000000..6b312431ad48c8
--- /dev/null
+++ b/clang/test/OpenMP/stripe_codegen_for_dependent.cpp
@@ -0,0 +1,193 @@
+// Check code generation
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=IR
+
+// Check same results after serialization round-trip
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=IR
+// expected-no-diagnostics
+
+// The loop trip count used by #pragma omp for depends on code generated
+// by #pragma omp file. Check that theses PreInits are emitted before
+// the code generated by #pragma omp for.
+
+#ifndef HEADER
+#define HEADER
+
+// placeholder for loop body code.
+extern "C" void body(...) {}
+
+
+// IR-LABEL: define {{.*}}@func(
+// IR-NEXT: [[ENTRY:.*]]:
+// IR-NEXT: %[[START_ADDR:.+]] = alloca i32, align 4
+// IR-NEXT: %[[END_ADDR:.+]] = alloca i32, align 4
+// IR-NEXT: %[[STEP_ADDR:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTOMP_IV:.+]] = alloca i32, align 4
+// IR-NEXT: %[[TMP:.+]] = alloca i32, align 4
+// IR-NEXT: %[[I:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_1:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTNEW_STEP:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_2:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_5:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_7:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTFLOOR_0_IV_I:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTOMP_LB:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTOMP_UB:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTOMP_STRIDE:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTOMP_IS_LAST:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTFLOOR_0_IV_I11:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTSTRIPE_0_IV_I:.+]] = alloca i32, align 4
+// IR-NEXT: %[[TMP0:.+]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:.+]])
+// IR-NEXT: store i32 %[[START:.+]], ptr %[[START_ADDR]], align 4
+// IR-NEXT: store i32 %[[END:.+]], ptr %[[END_ADDR]], align 4
+// IR-NEXT: store i32 %[[STEP:.+]], ptr %[[STEP_ADDR]], align 4
+// IR-NEXT: %[[TMP1:.+]] = load i32, ptr %[[START_ADDR]], align 4
+// IR-NEXT: store i32 %[[TMP1]], ptr %[[I]], align 4
+// IR-NEXT: %[[TMP2:.+]] = load i32, ptr %[[START_ADDR]], align 4
+// IR-NEXT: store i32 %[[TMP2]], ptr %[[DOTCAPTURE_EXPR_]], align 4
+// IR-NEXT: %[[TMP3:.+]] = load i32, ptr %[[END_ADDR]], align 4
+// IR-NEXT: store i32 %[[TMP3]], ptr %[[DOTCAPTURE_EXPR_1]], align 4
+// IR-NEXT: %[[TMP4:.+]] = load i32, ptr %[[STEP_ADDR]], align 4
+// IR-NEXT: store i32 %[[TMP4]], ptr %[[DOTNEW_STEP]], align 4
+// IR-NEXT: %[[TMP5:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_1]], align 4
+// IR-NEXT: %[[TMP6:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_]], align 4
+// IR-NEXT: %[[SUB:.+]] = sub i32 %[[TMP5]], %[[TMP6]]
+// IR-NEXT: %[[SUB3:.+]] = sub i32 %[[SUB]], 1
+// IR-NEXT: %[[TMP7:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
+// IR-NEXT: %[[ADD:.+]] = add i32 %[[SUB3]], %[[TMP7]]
+// IR-NEXT: %[[TMP8:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
+// IR-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP8]]
+// IR-NEXT: %[[SUB4:.+]] = sub i32 %[[DIV]], 1
+// IR-NEXT: store i32 %[[SUB4]], ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[TMP9:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD6:.+]] = add i32 %[[TMP9]], 1
+// IR-NEXT: store i32 %[[ADD6]], ptr %[[DOTCAPTURE_EXPR_5]], align 4
+// IR-NEXT: %[[TMP10:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_5]], align 4
+// IR-NEXT: %[[SUB8:.+]] = sub i32 %[[TMP10]], -3
+// IR-NEXT: %[[DIV9:.+]] = udiv i32 %[[SUB8]], 4
+// IR-NEXT: %[[SUB10:.+]] = sub i32 %[[DIV9]], 1
+// IR-NEXT: store i32 %[[SUB10]], ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: store i32 0, ptr %[[DOTFLOOR_0_IV_I]], align 4
+// IR-NEXT: %[[TMP11:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_5]], align 4
+// IR-NEXT: %[[CMP:.+]] = icmp ult i32 0, %[[TMP11]]
+// IR-NEXT: br i1 %[[CMP]], label %[[OMP_PRECOND_THEN:.+]], label %[[OMP_PRECOND_END:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[OMP_PRECOND_THEN]]:
+// IR-NEXT: store i32 0, ptr %[[DOTOMP_LB]], align 4
+// IR-NEXT: %[[TMP12:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: store i32 %[[TMP12]], ptr %[[DOTOMP_UB]], align 4
+// IR-NEXT: store i32 1, ptr %[[DOTOMP_STRIDE]], align 4
+// IR-NEXT: store i32 0, ptr %[[DOTOMP_IS_LAST]], align 4
+// IR-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1:.+]], i32 %[[TMP0]], i32 34, ptr %[[DOTOMP_IS_LAST]], ptr %[[DOTOMP_LB]], ptr %[[DOTOMP_UB]], ptr %[[DOTOMP_STRIDE]], i32 1, i32 1)
+// IR-NEXT: %[[TMP13:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
+// IR-NEXT: %[[TMP14:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: %[[CMP12:.+]] = icmp ugt i32 %[[TMP13]], %[[TMP14]]
+// IR-NEXT: br i1 %[[CMP12]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_TRUE]]:
+// IR-NEXT: %[[TMP15:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: br label %[[COND_END:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_FALSE]]:
+// IR-NEXT: %[[TMP16:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
+// IR-NEXT: br label %[[COND_END]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_END]]:
+// IR-NEXT: %[[COND:.+]] = phi i32 [ %[[TMP15]], %[[COND_TRUE]] ], [ %[[TMP16]], %[[COND_FALSE]] ]
+// IR-NEXT: store i32 %[[COND]], ptr %[[DOTOMP_UB]], align 4
+// IR-NEXT: %[[TMP17:.+]] = load i32, ptr %[[DOTOMP_LB]], align 4
+// IR-NEXT: store i32 %[[TMP17]], ptr %[[DOTOMP_IV]], align 4
+// IR-NEXT: br label %[[OMP_INNER_FOR_COND:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[OMP_INNER_FOR_COND]]:
+// IR-NEXT: %[[TMP18:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
+// IR-NEXT: %[[TMP19:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
+// IR-NEXT: %[[ADD13:.+]] = add i32 %[[TMP19]], 1
+// IR-NEXT: %[[CMP14:.+]] = icmp ult i32 %[[TMP18]], %[[ADD13]]
+// IR-NEXT: br i1 %[[CMP14]], label %[[OMP_INNER_FOR_BODY:.+]], label %[[OMP_INNER_FOR_END:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[OMP_INNER_FOR_BODY]]:
+// IR-NEXT: %[[TMP20:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
+// IR-NEXT: %[[MUL:.+]] = mul i32 %[[TMP20]], 4
+// IR-NEXT: %[[ADD15:.+]] = add i32 0, %[[MUL]]
+// IR-NEXT: store i32 %[[ADD15]], ptr %[[DOTFLOOR_0_IV_I11]], align 4
+// IR-NEXT: %[[TMP21:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I11]], align 4
+// IR-NEXT: store i32 %[[TMP21]], ptr %[[DOTSTRIPE_0_IV_I]], align 4
+// IR-NEXT: br label %[[FOR_COND:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[FOR_COND]]:
+// IR-NEXT: %[[TMP22:.+]] = load i32, ptr %[[DOTSTRIPE_0_IV_I]], align 4
+// IR-NEXT: %[[TMP23:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD16:.+]] = add i32 %[[TMP23]], 1
+// IR-NEXT: %[[TMP24:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I11]], align 4
+// IR-NEXT: %[[ADD17:.+]] = add i32 %[[TMP24]], 4
+// IR-NEXT: %[[CMP18:.+]] = icmp ult i32 %[[ADD16]], %[[ADD17]]
+// IR-NEXT: br i1 %[[CMP18]], label %[[COND_TRUE19:.+]], label %[[COND_FALSE21:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_TRUE19]]:
+// IR-NEXT: %[[TMP25:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD20:.+]] = add i32 %[[TMP25]], 1
+// IR-NEXT: br label %[[COND_END23:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_FALSE21]]:
+// IR-NEXT: %[[TMP26:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I11]], align 4
+// IR-NEXT: %[[ADD22:.+]] = add i32 %[[TMP26]], 4
+// IR-NEXT: br label %[[COND_END23]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_END23]]:
+// IR-NEXT: %[[COND24:.+]] = phi i32 [ %[[ADD20]], %[[COND_TRUE19]] ], [ %[[ADD22]], %[[COND_FALSE21]] ]
+// IR-NEXT: %[[CMP25:.+]] = icmp ult i32 %[[TMP22]], %[[COND24]]
+// IR-NEXT: br i1 %[[CMP25]], label %[[FOR_BODY:.+]], label %[[FOR_END:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[FOR_BODY]]:
+// IR-NEXT: %[[TMP27:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_]], align 4
+// IR-NEXT: %[[TMP28:.+]] = load i32, ptr %[[DOTSTRIPE_0_IV_I]], align 4
+// IR-NEXT: %[[TMP29:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
+// IR-NEXT: %[[MUL26:.+]] = mul i32 %[[TMP28]], %[[TMP29]]
+// IR-NEXT: %[[ADD27:.+]] = add i32 %[[TMP27]], %[[MUL26]]
+// IR-NEXT: store i32 %[[ADD27]], ptr %[[I]], align 4
+// IR-NEXT: %[[TMP30:.+]] = load i32, ptr %[[START_ADDR]], align 4
+// IR-NEXT: %[[TMP31:.+]] = load i32, ptr %[[END_ADDR]], align 4
+// IR-NEXT: %[[TMP32:.+]] = load i32, ptr %[[STEP_ADDR]], align 4
+// IR-NEXT: %[[TMP33:.+]] = load i32, ptr %[[I]], align 4
+// IR-NEXT: call void (...) @body(i32 noundef %[[TMP30]], i32 noundef %[[TMP31]], i32 noundef %[[TMP32]], i32 noundef %[[TMP33]])
+// IR-NEXT: br label %[[FOR_INC:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[FOR_INC]]:
+// IR-NEXT: %[[TMP34:.+]] = load i32, ptr %[[DOTSTRIPE_0_IV_I]], align 4
+// IR-NEXT: %[[INC:.+]] = add i32 %[[TMP34]], 1
+// IR-NEXT: store i32 %[[INC]], ptr %[[DOTSTRIPE_0_IV_I]], align 4
+// IR-NEXT: br label %[[FOR_COND]], !llvm.loop ![[LOOP3:[0-9]+]]
+// IR-EMPTY:
+// IR-NEXT: [[FOR_END]]:
+// IR-NEXT: br label %[[OMP_BODY_CONTINUE:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[OMP_BODY_CONTINUE]]:
+// IR-NEXT: br label %[[OMP_INNER_FOR_INC:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[OMP_INNER_FOR_INC]]:
+// IR-NEXT: %[[TMP35:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
+// IR-NEXT: %[[ADD28:.+]] = add i32 %[[TMP35]], 1
+// IR-NEXT: store i32 %[[ADD28]], ptr %[[DOTOMP_IV]], align 4
+// IR-NEXT: br label %[[OMP_INNER_FOR_COND]]
+// IR-EMPTY:
+// IR-NEXT: [[OMP_INNER_FOR_END]]:
+// IR-NEXT: br label %[[OMP_LOOP_EXIT:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[OMP_LOOP_EXIT]]:
+// IR-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 %[[TMP0]])
+// IR-NEXT: br label %[[OMP_PRECOND_END]]
+// IR-EMPTY:
+// IR-NEXT: [[OMP_PRECOND_END]]:
+// IR-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:.+]], i32 %[[TMP0]])
+// IR-NEXT: ret void
+// IR-NEXT: }
+extern "C" void func(int start, int end, int step) {
+#pragma omp for
+#pragma omp stripe sizes(4)
+ for (int i = start; i < end; i += step)
+ body(start, end, step, i);
+}
+
+#endif /* HEADER */
diff --git a/clang/test/OpenMP/stripe_messages.cpp b/clang/test/OpenMP/stripe_messages.cpp
new file mode 100644
index 00000000000000..ab89b0feddb0ae
--- /dev/null
+++ b/clang/test/OpenMP/stripe_messages.cpp
@@ -0,0 +1,163 @@
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -std=c++17 -fopenmp -fsyntax-only -Wuninitialized -verify %s
+
+void func() {
+
+ // expected-error at +1 {{expected '('}}
+ #pragma omp stripe sizes
+ ;
+
+ // expected-error at +2 {{expected expression}}
+ // expected-error at +1 {{expected ')'}} expected-note at +1 {{to match this '('}}
+ #pragma omp stripe sizes(
+ ;
+
+ // expected-error at +1 {{expected expression}}
+ #pragma omp stripe sizes()
+ ;
+
+ // expected-error at +1 {{expected ')'}} expected-note at +1 {{to match this '('}}
+ #pragma omp stripe sizes(5
+ for (int i = 0; i < 7; ++i);
+
+ // expected-error at +2 {{expected expression}}
+ // expected-error at +1 {{expected ')'}} expected-note at +1 {{to match this '('}}
+ #pragma omp stripe sizes(5,
+ ;
+
+ // expected-error at +1 {{expected expression}}
+ #pragma omp stripe sizes(5,)
+ ;
+
+ // expected-error at +2 {{expected expression}}
+ // expected-error at +1 {{expected ')'}} expected-note at +1 {{to match this '('}}
+ #pragma omp stripe sizes(5+
+ ;
+
+ // expected-error at +1 {{expected expression}}
+ #pragma omp stripe sizes(5+)
+ ;
+
+ // expected-error at +1 {{expected expression}}
+ #pragma omp stripe sizes(for)
+ ;
+
+ // expected-error at +1 {{argument to 'sizes' clause must be a strictly positive integer value}}
+ #pragma omp stripe sizes(0)
+ for (int i = 0; i < 7; ++i)
+ ;
+
+ // expected-warning at +2 {{extra tokens at the end of '#pragma omp stripe' are ignored}}
+ // expected-error at +1 {{directive '#pragma omp stripe' requires the 'sizes' clause}}
+ #pragma omp stripe foo
+ ;
+
+ // expected-error at +1 {{directive '#pragma omp stripe' cannot contain more than one 'sizes' clause}}
+ #pragma omp stripe sizes(5) sizes(5)
+ for (int i = 0; i < 7; ++i)
+ ;
+
+ // expected-error at +1 {{unexpected OpenMP clause 'collapse' in directive '#pragma omp stripe'}}
+ #pragma omp stripe sizes(5) collapse(2)
+ for (int i = 0; i < 7; ++i)
+ ;
+
+ {
+ // expected-error at +2 {{expected statement}}
+ #pragma omp stripe sizes(5)
+ }
+
+ // expected-error at +2 {{statement after '#pragma omp stripe' must be a for loop}}
+ #pragma omp stripe sizes(5)
+ int b = 0;
+
+ // expected-error at +3 {{statement after '#pragma omp stripe' must be a for loop}}
+ #pragma omp stripe sizes(5,5)
+ for (int i = 0; i < 7; ++i)
+ ;
+
+ // expected-error at +2 {{statement after '#pragma omp stripe' must be a for loop}}
+ #pragma omp stripe sizes(5,5)
+ for (int i = 0; i < 7; ++i) {
+ int k = 3;
+ for (int j = 0; j < 7; ++j)
+ ;
+ }
+
+ // expected-error at +3 {{expected loop invariant expression}}
+ #pragma omp stripe sizes(5,5)
+ for (int i = 0; i < 7; ++i)
+ for (int j = i; j < 7; ++j)
+ ;
+
+ // expected-error at +3 {{expected loop invariant expression}}
+ #pragma omp stripe sizes(5,5)
+ for (int i = 0; i < 7; ++i)
+ for (int j = 0; j < i; ++j)
+ ;
+
+ // expected-error at +3 {{expected loop invariant expression}}
+ #pragma omp stripe sizes(5,5)
+ for (int i = 0; i < 7; ++i)
+ for (int j = 0; j < i; ++j)
+ ;
+
+ // expected-error at +5 {{expected 3 for loops after '#pragma omp for', but found only 2}}
+ // expected-note at +1 {{as specified in 'collapse' clause}}
+ #pragma omp for collapse(3)
+ #pragma omp stripe sizes(5)
+ for (int i = 0; i < 7; ++i)
+ ;
+
+ // expected-error at +2 {{statement after '#pragma omp stripe' must be a for loop}}
+ #pragma omp stripe sizes(5)
+ #pragma omp for
+ for (int i = 0; i < 7; ++i)
+ ;
+
+ // expected-error at +2 {{condition of OpenMP for loop must be a relational comparison ('<', '<=', '>', '>=', or '!=') of loop variable 'i'}}
+ #pragma omp stripe sizes(5)
+ for (int i = 0; i/3<7; ++i)
+ ;
+
+ // expected-error at +2 {{expression must have integral or unscoped enumeration type, not 'struct S'}}
+ struct S{} s;
+ #pragma omp stripe sizes(s)
+ for (int i = 0; i < 7; ++i)
+ ;
+}
+
+
+template <typename T>
+static void templated_func() {
+ // In a template context, but expression itself not instantiation-dependent
+
+ // expected-error at +1 {{argument to 'sizes' clause must be a strictly positive integer value}}
+ #pragma omp stripe sizes(0)
+ for (int i = 0; i < 7; ++i)
+ ;
+}
+
+template <int S>
+static void templated_func_value_dependent() {
+ // expected-error at +1 {{argument to 'sizes' clause must be a strictly positive integer value}}
+ #pragma omp stripe sizes(S)
+ for (int i = 0; i < 7; ++i)
+ ;
+}
+
+template <typename T>
+static void templated_func_type_dependent() {
+ constexpr T s = 0;
+ // expected-error at +1 {{argument to 'sizes' clause must be a strictly positive integer value}}
+ #pragma omp stripe sizes(s)
+ for (int i = 0; i < 7; ++i)
+ ;
+}
+
+void template_inst() {
+ templated_func<int>();
+ // expected-note at +1 {{in instantiation of function template specialization 'templated_func_value_dependent<0>' requested here}}
+ templated_func_value_dependent<0>();
+ // expected-note at +1 {{in instantiation of function template specialization 'templated_func_type_dependent<int>' requested here}}
+ templated_func_type_dependent<int>();
+}
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index 221f419861af10..5057080de88165 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -2193,6 +2193,7 @@ class EnqueueVisitor : public ConstStmtVisitor<EnqueueVisitor, void>,
void
VisitOMPLoopTransformationDirective(const OMPLoopTransformationDirective *D);
void VisitOMPTileDirective(const OMPTileDirective *D);
+ void VisitOMPStripeDirective(const OMPStripeDirective *D);
void VisitOMPUnrollDirective(const OMPUnrollDirective *D);
void VisitOMPReverseDirective(const OMPReverseDirective *D);
void VisitOMPInterchangeDirective(const OMPInterchangeDirective *D);
@@ -3283,6 +3284,10 @@ void EnqueueVisitor::VisitOMPTileDirective(const OMPTileDirective *D) {
VisitOMPLoopTransformationDirective(D);
}
+void EnqueueVisitor::VisitOMPStripeDirective(const OMPStripeDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
+}
+
void EnqueueVisitor::VisitOMPUnrollDirective(const OMPUnrollDirective *D) {
VisitOMPLoopTransformationDirective(D);
}
@@ -6175,6 +6180,8 @@ CXString clang_getCursorKindSpelling(enum CXCursorKind Kind) {
return cxstring::createRef("OMPSimdDirective");
case CXCursor_OMPTileDirective:
return cxstring::createRef("OMPTileDirective");
+ case CXCursor_OMPStripeDirective:
+ return cxstring::createRef("OMPStripeDirective");
case CXCursor_OMPUnrollDirective:
return cxstring::createRef("OMPUnrollDirective");
case CXCursor_OMPReverseDirective:
diff --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp
index c8cf51d8061328..07bf7d50d8e664 100644
--- a/clang/tools/libclang/CXCursor.cpp
+++ b/clang/tools/libclang/CXCursor.cpp
@@ -672,6 +672,9 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent,
case Stmt::OMPTileDirectiveClass:
K = CXCursor_OMPTileDirective;
break;
+ case Stmt::OMPStripeDirectiveClass:
+ K = CXCursor_OMPStripeDirective;
+ break;
case Stmt::OMPUnrollDirectiveClass:
K = CXCursor_OMPUnrollDirective;
break;
@@ -679,7 +682,7 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent,
K = CXCursor_OMPReverseDirective;
break;
case Stmt::OMPInterchangeDirectiveClass:
- K = CXCursor_OMPTileDirective;
+ K = CXCursor_OMPInterchangeDirective;
break;
case Stmt::OMPForDirectiveClass:
K = CXCursor_OMPForDirective;
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td
index 772f60343c6348..f163fbd104eb1e 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -1179,6 +1179,13 @@ def OMP_Tile : Directive<"tile"> {
let association = AS_Loop;
let category = CA_Executable;
}
+def OMP_Stripe : Directive<"stripe"> {
+ let allowedOnceClauses = [
+ VersionedClause<OMPC_Sizes, 51>,
+ ];
+ let association = AS_Loop;
+ let category = CA_Executable;
+}
def OMP_Unknown : Directive<"unknown"> {
let isDefault = true;
let association = AS_None;
More information about the llvm-commits
mailing list