<html><body><p>Thanks - already reverted and I found the problem. Will push a new version soon.<br><br>-- Carlo<br><br><img width="16" height="16" src="cid:2__=8FBBF5FFDF127A3A8f9e8a93df938690918c8FB@" border="0" alt="Inactive hide details for Nico Weber ---03/04/2016 06:09:59 PM---From: Nico Weber <thakis@google.com> To: Carlo Bertolli/Watson"><font color="#424282">Nico Weber ---03/04/2016 06:09:59 PM---From: Nico Weber <thakis@google.com> To: Carlo Bertolli/Watson/IBM@IBMUS</font><br><br><font size="2" color="#5F5F5F">From: </font><font size="2">Nico Weber <thakis@google.com></font><br><font size="2" color="#5F5F5F">To: </font><font size="2">Carlo Bertolli/Watson/IBM@IBMUS</font><br><font size="2" color="#5F5F5F">Cc: </font><font size="2">cfe-commits <cfe-commits@lists.llvm.org></font><br><font size="2" color="#5F5F5F">Date: </font><font size="2">03/04/2016 06:09 PM</font><br><font size="2" color="#5F5F5F">Subject: </font><font size="2">Re: r262741 - [OPENMP] Codegen for distribute directive</font><br><hr width="100%" size="2" align="left" noshade style="color:#8091A5; "><br><br><br><font size="4">Looks like the new tests don't pass on Windows: </font><a href="http://lab.llvm.org:8011/builders/clang-x64-ninja-win7/builds/10365"><u><font size="4" color="#0000FF">http://lab.llvm.org:8011/builders/clang-x64-ninja-win7/builds/10365</font></u></a><p><font size="4">On Mar 4, 2016 12:29 PM, "Carlo Bertolli via cfe-commits" <</font><a href="mailto:cfe-commits@lists.llvm.org"><u><font size="4" color="#0000FF">cfe-commits@lists.llvm.org</font></u></a><font size="4">> wrote:</font><ul><font size="4">Author: cbertol<br>Date: Fri Mar 4 14:24:58 2016<br>New Revision: 262741<br><br>URL: </font><a href="http://llvm.org/viewvc/llvm-project?rev=262741&view=rev" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project?rev=262741&view=rev</font></u></a><font size="4"><br>Log:<br>[OPENMP] Codegen for distribute directive<br><br>This patch provide basic implementation of codegen for teams directive, excluding all clauses except dist_schedule. It also fixes parts of AST reader/writer to enable correct pre-compiled header handling.<br></font><u><font size="4" color="#0000FF"><br></font></u><a href="http://reviews.llvm.org/D17170" target="_blank"><u><font size="4" color="#0000FF">http://reviews.llvm.org/D17170</font></u></a><font size="4"><br><br><br>Added:<br> cfe/trunk/test/OpenMP/distribute_codegen.cpp<br>Modified:<br> cfe/trunk/include/clang/AST/StmtOpenMP.h<br> cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp<br> cfe/trunk/lib/CodeGen/CGOpenMPRuntime.h<br> cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp<br> cfe/trunk/lib/CodeGen/CodeGenFunction.h<br> cfe/trunk/lib/Serialization/ASTReaderStmt.cpp<br> cfe/trunk/lib/Serialization/ASTWriterStmt.cpp<br><br>Modified: cfe/trunk/include/clang/AST/StmtOpenMP.h<br>URL: </font><a href="http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/AST/StmtOpenMP.h?rev=262741&r1=262740&r2=262741&view=diff" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/AST/StmtOpenMP.h?rev=262741&r1=262740&r2=262741&view=diff</font></u></a><font size="4"><br>==============================================================================<br>--- cfe/trunk/include/clang/AST/StmtOpenMP.h (original)<br>+++ cfe/trunk/include/clang/AST/StmtOpenMP.h Fri Mar 4 14:24:58 2016<br>@@ -595,49 +595,56 @@ public:<br> }<br> Expr *getIsLastIterVariable() const {<br> assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(getDirectiveKind())) &&<br>+ isOpenMPTaskLoopDirective(getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(getDirectiveKind())) &&<br> "expected worksharing loop directive");<br> return const_cast<Expr *>(reinterpret_cast<const Expr *>(<br> *std::next(child_begin(), IsLastIterVariableOffset)));<br> }<br> Expr *getLowerBoundVariable() const {<br> assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(getDirectiveKind())) &&<br>+ isOpenMPTaskLoopDirective(getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(getDirectiveKind())) &&<br> "expected worksharing loop directive");<br> return const_cast<Expr *>(reinterpret_cast<const Expr *>(<br> *std::next(child_begin(), LowerBoundVariableOffset)));<br> }<br> Expr *getUpperBoundVariable() const {<br> assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(getDirectiveKind())) &&<br>+ isOpenMPTaskLoopDirective(getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(getDirectiveKind())) &&<br> "expected worksharing loop directive");<br> return const_cast<Expr *>(reinterpret_cast<const Expr *>(<br> *std::next(child_begin(), UpperBoundVariableOffset)));<br> }<br> Expr *getStrideVariable() const {<br> assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(getDirectiveKind())) &&<br>+ isOpenMPTaskLoopDirective(getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(getDirectiveKind())) &&<br> "expected worksharing loop directive");<br> return const_cast<Expr *>(reinterpret_cast<const Expr *>(<br> *std::next(child_begin(), StrideVariableOffset)));<br> }<br> Expr *getEnsureUpperBound() const {<br> assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(getDirectiveKind())) &&<br>+ isOpenMPTaskLoopDirective(getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(getDirectiveKind())) &&<br> "expected worksharing loop directive");<br> return const_cast<Expr *>(reinterpret_cast<const Expr *>(<br> *std::next(child_begin(), EnsureUpperBoundOffset)));<br> }<br> Expr *getNextLowerBound() const {<br> assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(getDirectiveKind())) &&<br>+ isOpenMPTaskLoopDirective(getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(getDirectiveKind())) &&<br> "expected worksharing loop directive");<br> return const_cast<Expr *>(reinterpret_cast<const Expr *>(<br> *std::next(child_begin(), NextLowerBoundOffset)));<br> }<br> Expr *getNextUpperBound() const {<br> assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(getDirectiveKind())) &&<br>+ isOpenMPTaskLoopDirective(getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(getDirectiveKind())) &&<br> "expected worksharing loop directive");<br> return const_cast<Expr *>(reinterpret_cast<const Expr *>(<br> *std::next(child_begin(), NextUpperBoundOffset)));<br><br>Modified: cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp<br>URL: </font><a href="http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp?rev=262741&r1=262740&r2=262741&view=diff" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp?rev=262741&r1=262740&r2=262741&view=diff</font></u></a><font size="4"><br>==============================================================================<br>--- cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp (original)<br>+++ cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp Fri Mar 4 14:24:58 2016<br>@@ -425,6 +425,9 @@ enum OpenMPSchedType {<br> OMP_ord_runtime = 69,<br> OMP_ord_auto = 70,<br> OMP_sch_default = OMP_sch_static,<br>+ /// \brief dist_schedule types<br>+ OMP_dist_sch_static_chunked = 91,<br>+ OMP_dist_sch_static = 92,<br> };<br><br> enum OpenMPRTLFunction {<br>@@ -2148,12 +2151,26 @@ static OpenMPSchedType getRuntimeSchedul<br> llvm_unreachable("Unexpected runtime schedule");<br> }<br><br>+/// \brief Map the OpenMP distribute schedule to the runtime enumeration.<br>+static OpenMPSchedType<br>+getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {<br>+ // only static is allowed for dist_schedule<br>+ return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;<br>+}<br>+<br> bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,<br> bool Chunked) const {<br> auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);<br> return Schedule == OMP_sch_static;<br> }<br><br>+bool CGOpenMPRuntime::isStaticNonchunked(<br>+ OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {<br>+ auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked);<br>+ return Schedule == OMP_dist_sch_static;<br>+}<br>+<br>+<br> bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {<br> auto Schedule =<br> getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);<br>@@ -2194,6 +2211,55 @@ void CGOpenMPRuntime::emitForDispatchIni<br> CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);<br> }<br><br>+static void emitForStaticInitCall(CodeGenFunction &CGF,<br>+ SourceLocation Loc,<br>+ llvm::Value * UpdateLocation,<br>+ llvm::Value * ThreadId,<br>+ llvm::Constant * ForStaticInitFunction,<br>+ OpenMPSchedType Schedule,<br>+ unsigned IVSize, bool IVSigned, bool Ordered,<br>+ Address IL, Address LB, Address UB,<br>+ Address ST, llvm::Value *Chunk) {<br>+ if (!CGF.HaveInsertPoint())<br>+ return;<br>+<br>+ assert(!Ordered);<br>+ assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||<br>+ Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||<br>+ Schedule == OMP_dist_sch_static ||<br>+ Schedule == OMP_dist_sch_static_chunked);<br>+<br>+ // Call __kmpc_for_static_init(<br>+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,<br>+ // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,<br>+ // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,<br>+ // kmp_int[32|64] incr, kmp_int[32|64] chunk);<br>+ if (Chunk == nullptr) {<br>+ assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||<br>+ Schedule == OMP_dist_sch_static) &&<br>+ "expected static non-chunked schedule");<br>+ // If the Chunk was not specified in the clause - use default value 1.<br>+ Chunk = CGF.Builder.getIntN(IVSize, 1);<br>+ } else {<br>+ assert((Schedule == OMP_sch_static_chunked ||<br>+ Schedule == OMP_ord_static_chunked ||<br>+ Schedule == OMP_dist_sch_static_chunked) &&<br>+ "expected static chunked schedule");<br>+ }<br>+ llvm::Value *Args[] = {<br>+ UpdateLocation,<br>+ ThreadId,<br>+ CGF.Builder.getInt32(Schedule), // Schedule type<br>+ IL.getPointer(), // &isLastIter<br>+ LB.getPointer(), // &LB<br>+ UB.getPointer(), // &UB<br>+ ST.getPointer(), // &Stride<br>+ CGF.Builder.getIntN(IVSize, 1), // Incr<br>+ Chunk // Chunk<br>+ };<br>+ CGF.EmitRuntimeCall(ForStaticInitFunction, Args);<br>+}<br>+<br> void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,<br> SourceLocation Loc,<br> OpenMPScheduleClauseKind ScheduleKind,<br>@@ -2201,41 +2267,27 @@ void CGOpenMPRuntime::emitForStaticInit(<br> bool Ordered, Address IL, Address LB,<br> Address UB, Address ST,<br> llvm::Value *Chunk) {<br>- if (!CGF.HaveInsertPoint())<br>- return;<br>- OpenMPSchedType Schedule =<br>- getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered);<br>- assert(!Ordered);<br>- assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||<br>- Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked);<br>-<br>- // Call __kmpc_for_static_init(<br>- // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,<br>- // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,<br>- // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,<br>- // kmp_int[32|64] incr, kmp_int[32|64] chunk);<br>- if (Chunk == nullptr) {<br>- assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static) &&<br>- "expected static non-chunked schedule");<br>- // If the Chunk was not specified in the clause - use default value 1.<br>- Chunk = CGF.Builder.getIntN(IVSize, 1);<br>- } else {<br>- assert((Schedule == OMP_sch_static_chunked ||<br>- Schedule == OMP_ord_static_chunked) &&<br>- "expected static chunked schedule");<br>- }<br>- llvm::Value *Args[] = {<br>- emitUpdateLocation(CGF, Loc),<br>- getThreadID(CGF, Loc),<br>- CGF.Builder.getInt32(Schedule), // Schedule type<br>- IL.getPointer(), // &isLastIter<br>- LB.getPointer(), // &LB<br>- UB.getPointer(), // &UB<br>- ST.getPointer(), // &Stride<br>- CGF.Builder.getIntN(IVSize, 1), // Incr<br>- Chunk // Chunk<br>- };<br>- CGF.EmitRuntimeCall(createForStaticInitFunction(IVSize, IVSigned), Args);<br>+ OpenMPSchedType ScheduleNum = getRuntimeSchedule(ScheduleKind, Chunk != nullptr,<br>+ Ordered);<br>+ auto *UpdatedLocation = emitUpdateLocation(CGF, Loc);<br>+ auto *ThreadId = getThreadID(CGF, Loc);<br>+ auto *StaticInitFunction = createForStaticInitFunction(IVSize, IVSigned);<br>+ emitForStaticInitCall(CGF, Loc, UpdatedLocation, ThreadId, StaticInitFunction,<br>+ ScheduleNum, IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk);<br>+}<br>+<br>+void CGOpenMPRuntime::emitDistributeStaticInit(CodeGenFunction &CGF,<br>+ SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind,<br>+ unsigned IVSize, bool IVSigned,<br>+ bool Ordered, Address IL, Address LB,<br>+ Address UB, Address ST,<br>+ llvm::Value *Chunk) {<br>+ OpenMPSchedType ScheduleNum = getRuntimeSchedule(SchedKind, Chunk != nullptr);<br>+ auto *UpdatedLocation = emitUpdateLocation(CGF, Loc);<br>+ auto *ThreadId = getThreadID(CGF, Loc);<br>+ auto *StaticInitFunction = createForStaticInitFunction(IVSize, IVSigned);<br>+ emitForStaticInitCall(CGF, Loc, UpdatedLocation, ThreadId, StaticInitFunction,<br>+ ScheduleNum, IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk);<br> }<br><br> void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,<br><br>Modified: cfe/trunk/lib/CodeGen/CGOpenMPRuntime.h<br>URL: </font><a href="http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGOpenMPRuntime.h?rev=262741&r1=262740&r2=262741&view=diff" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGOpenMPRuntime.h?rev=262741&r1=262740&r2=262741&view=diff</font></u></a><font size="4"><br>==============================================================================<br>--- cfe/trunk/lib/CodeGen/CGOpenMPRuntime.h (original)<br>+++ cfe/trunk/lib/CodeGen/CGOpenMPRuntime.h Fri Mar 4 14:24:58 2016<br>@@ -493,6 +493,14 @@ public:<br> virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,<br> bool Chunked) const;<br><br>+ /// \brief Check if the specified \a ScheduleKind is static non-chunked.<br>+ /// This kind of distribute directive is emitted without outer loop.<br>+ /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.<br>+ /// \param Chunked True if chunk is specified in the clause.<br>+ ///<br>+ virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,<br>+ bool Chunked) const;<br>+<br> /// \brief Check if the specified \a ScheduleKind is dynamic.<br> /// This kind of worksharing directive is emitted without outer loop.<br> /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.<br>@@ -536,6 +544,31 @@ public:<br> Address UB, Address ST,<br> llvm::Value *Chunk = nullptr);<br><br>+ ///<br>+ /// \param CGF Reference to current CodeGenFunction.<br>+ /// \param Loc Clang source location.<br>+ /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.<br>+ /// \param IVSize Size of the iteration variable in bits.<br>+ /// \param IVSigned Sign of the interation variable.<br>+ /// \param Ordered true if loop is ordered, false otherwise.<br>+ /// \param IL Address of the output variable in which the flag of the<br>+ /// last iteration is returned.<br>+ /// \param LB Address of the output variable in which the lower iteration<br>+ /// number is returned.<br>+ /// \param UB Address of the output variable in which the upper iteration<br>+ /// number is returned.<br>+ /// \param ST Address of the output variable in which the stride value is<br>+ /// returned nesessary to generated the static_chunked scheduled loop.<br>+ /// \param Chunk Value of the chunk for the static_chunked scheduled loop.<br>+ /// For the default (nullptr) value, the chunk 1 will be used.<br>+ ///<br>+ virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,<br>+ OpenMPDistScheduleClauseKind SchedKind,<br>+ unsigned IVSize, bool IVSigned,<br>+ bool Ordered, Address IL, Address LB,<br>+ Address UB, Address ST,<br>+ llvm::Value *Chunk = nullptr);<br>+<br> /// \brief Call the appropriate runtime routine to notify that we finished<br> /// iteration of the ordered loop with the dynamic scheduling.<br> ///<br><br>Modified: cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp<br>URL: </font><a href="http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp?rev=262741&r1=262740&r2=262741&view=diff" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp?rev=262741&r1=262740&r2=262741&view=diff</font></u></a><font size="4"><br>==============================================================================<br>--- cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp (original)<br>+++ cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp Fri Mar 4 14:24:58 2016<br>@@ -1410,82 +1410,15 @@ void CodeGenFunction::EmitOMPSimdDirecti<br> CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);<br> }<br><br>-void CodeGenFunction::EmitOMPForOuterLoop(<br>- OpenMPScheduleClauseKind ScheduleKind, bool IsMonotonic,<br>+void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,<br> const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,<br> Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {<br> auto &RT = CGM.getOpenMPRuntime();<br><br>- // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).<br>- const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);<br>-<br>- assert((Ordered ||<br>- !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&<br>- "static non-chunked schedule does not need outer loop");<br>-<br>- // Emit outer loop.<br>- //<br>- // OpenMP [2.7.1, Loop Construct, Description, table 2-1]<br>- // When schedule(dynamic,chunk_size) is specified, the iterations are<br>- // distributed to threads in the team in chunks as the threads request them.<br>- // Each thread executes a chunk of iterations, then requests another chunk,<br>- // until no chunks remain to be distributed. Each chunk contains chunk_size<br>- // iterations, except for the last chunk to be distributed, which may have<br>- // fewer iterations. When no chunk_size is specified, it defaults to 1.<br>- //<br>- // When schedule(guided,chunk_size) is specified, the iterations are assigned<br>- // to threads in the team in chunks as the executing threads request them.<br>- // Each thread executes a chunk of iterations, then requests another chunk,<br>- // until no chunks remain to be assigned. For a chunk_size of 1, the size of<br>- // each chunk is proportional to the number of unassigned iterations divided<br>- // by the number of threads in the team, decreasing to 1. For a chunk_size<br>- // with value k (greater than 1), the size of each chunk is determined in the<br>- // same way, with the restriction that the chunks do not contain fewer than k<br>- // iterations (except for the last chunk to be assigned, which may have fewer<br>- // than k iterations).<br>- //<br>- // When schedule(auto) is specified, the decision regarding scheduling is<br>- // delegated to the compiler and/or runtime system. The programmer gives the<br>- // implementation the freedom to choose any possible mapping of iterations to<br>- // threads in the team.<br>- //<br>- // When schedule(runtime) is specified, the decision regarding scheduling is<br>- // deferred until run time, and the schedule and chunk size are taken from the<br>- // run-sched-var ICV. If the ICV is set to auto, the schedule is<br>- // implementation defined<br>- //<br>- // while(__kmpc_dispatch_next(&LB, &UB)) {<br>- // idx = LB;<br>- // while (idx <= UB) { BODY; ++idx;<br>- // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.<br>- // } // inner loop<br>- // }<br>- //<br>- // OpenMP [2.7.1, Loop Construct, Description, table 2-1]<br>- // When schedule(static, chunk_size) is specified, iterations are divided into<br>- // chunks of size chunk_size, and the chunks are assigned to the threads in<br>- // the team in a round-robin fashion in the order of the thread number.<br>- //<br>- // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {<br>- // while (idx <= UB) { BODY; ++idx; } // inner loop<br>- // LB = LB + ST;<br>- // UB = UB + ST;<br>- // }<br>- //<br>-<br> const Expr *IVExpr = S.getIterationVariable();<br> const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());<br> const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();<br><br>- if (DynamicOrOrdered) {<br>- llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());<br>- RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind,<br>- IVSize, IVSigned, Ordered, UBVal, Chunk);<br>- } else {<br>- RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,<br>- IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk);<br>- }<br>-<br> auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");<br><br> // Start the loop with a block that tests the condition.<br>@@ -1565,6 +1498,111 @@ void CodeGenFunction::EmitOMPForOuterLoo<br> // Tell the runtime we are done.<br> if (!DynamicOrOrdered)<br> RT.emitForStaticFinish(*this, S.getLocEnd());<br>+<br>+}<br>+<br>+void CodeGenFunction::EmitOMPForOuterLoop(<br>+ OpenMPScheduleClauseKind ScheduleKind, bool IsMonotonic,<br>+ const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,<br>+ Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {<br>+ auto &RT = CGM.getOpenMPRuntime();<br>+<br>+ // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).<br>+ const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);<br>+<br>+ assert((Ordered ||<br>+ !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&<br>+ "static non-chunked schedule does not need outer loop");<br>+<br>+ // Emit outer loop.<br>+ //<br>+ // OpenMP [2.7.1, Loop Construct, Description, table 2-1]<br>+ // When schedule(dynamic,chunk_size) is specified, the iterations are<br>+ // distributed to threads in the team in chunks as the threads request them.<br>+ // Each thread executes a chunk of iterations, then requests another chunk,<br>+ // until no chunks remain to be distributed. Each chunk contains chunk_size<br>+ // iterations, except for the last chunk to be distributed, which may have<br>+ // fewer iterations. When no chunk_size is specified, it defaults to 1.<br>+ //<br>+ // When schedule(guided,chunk_size) is specified, the iterations are assigned<br>+ // to threads in the team in chunks as the executing threads request them.<br>+ // Each thread executes a chunk of iterations, then requests another chunk,<br>+ // until no chunks remain to be assigned. For a chunk_size of 1, the size of<br>+ // each chunk is proportional to the number of unassigned iterations divided<br>+ // by the number of threads in the team, decreasing to 1. For a chunk_size<br>+ // with value k (greater than 1), the size of each chunk is determined in the<br>+ // same way, with the restriction that the chunks do not contain fewer than k<br>+ // iterations (except for the last chunk to be assigned, which may have fewer<br>+ // than k iterations).<br>+ //<br>+ // When schedule(auto) is specified, the decision regarding scheduling is<br>+ // delegated to the compiler and/or runtime system. The programmer gives the<br>+ // implementation the freedom to choose any possible mapping of iterations to<br>+ // threads in the team.<br>+ //<br>+ // When schedule(runtime) is specified, the decision regarding scheduling is<br>+ // deferred until run time, and the schedule and chunk size are taken from the<br>+ // run-sched-var ICV. If the ICV is set to auto, the schedule is<br>+ // implementation defined<br>+ //<br>+ // while(__kmpc_dispatch_next(&LB, &UB)) {<br>+ // idx = LB;<br>+ // while (idx <= UB) { BODY; ++idx;<br>+ // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.<br>+ // } // inner loop<br>+ // }<br>+ //<br>+ // OpenMP [2.7.1, Loop Construct, Description, table 2-1]<br>+ // When schedule(static, chunk_size) is specified, iterations are divided into<br>+ // chunks of size chunk_size, and the chunks are assigned to the threads in<br>+ // the team in a round-robin fashion in the order of the thread number.<br>+ //<br>+ // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {<br>+ // while (idx <= UB) { BODY; ++idx; } // inner loop<br>+ // LB = LB + ST;<br>+ // UB = UB + ST;<br>+ // }<br>+ //<br>+<br>+ const Expr *IVExpr = S.getIterationVariable();<br>+ const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());<br>+ const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();<br>+<br>+ if (DynamicOrOrdered) {<br>+ llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());<br>+ RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind,<br>+ IVSize, IVSigned, Ordered, UBVal, Chunk);<br>+ } else {<br>+ RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,<br>+ Ordered, IL, LB, UB, ST, Chunk);<br>+ }<br>+<br>+ EmitOMPOuterLoop(IsMonotonic, DynamicOrOrdered, S, LoopScope, Ordered, LB, UB,<br>+ ST, IL, Chunk);<br>+}<br>+<br>+void CodeGenFunction::EmitOMPDistributeOuterLoop(<br>+ OpenMPDistScheduleClauseKind ScheduleKind,<br>+ const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,<br>+ Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {<br>+<br>+ auto &RT = CGM.getOpenMPRuntime();<br>+<br>+ // Emit outer loop.<br>+ // Same behavior as a OMPForOuterLoop, except that schedule cannot be<br>+ // dynamic<br>+ //<br>+<br>+ const Expr *IVExpr = S.getIterationVariable();<br>+ const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());<br>+ const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();<br>+<br>+ RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,<br>+ IVSize, IVSigned, /* Ordered = */ false,<br>+ IL, LB, UB, ST, Chunk);<br>+<br>+ EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false,<br>+ S, LoopScope, /* Ordered = */ false, LB, UB, ST, IL, Chunk);<br> }<br><br> /// \brief Emit a helper variable and return corresponding lvalue.<br>@@ -2191,9 +2229,130 @@ void CodeGenFunction::EmitOMPFlushDirect<br> }(), S.getLocStart());<br> }<br><br>+void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {<br>+ // Emit the loop iteration variable.<br>+ auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());<br>+ auto IVDecl = cast<VarDecl>(IVExpr->getDecl());<br>+ EmitVarDecl(*IVDecl);<br>+<br>+ // Emit the iterations count variable.<br>+ // If it is not a variable, Sema decided to calculate iterations count on each<br>+ // iteration (e.g., it is foldable into a constant).<br>+ if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {<br>+ EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));<br>+ // Emit calculation of the iterations count.<br>+ EmitIgnoredExpr(S.getCalcLastIteration());<br>+ }<br>+<br>+ auto &RT = CGM.getOpenMPRuntime();<br>+<br>+ // Check pre-condition.<br>+ {<br>+ // Skip the entire loop if we don't meet the precondition.<br>+ // If the condition constant folds and can be elided, avoid emitting the<br>+ // whole loop.<br>+ bool CondConstant;<br>+ llvm::BasicBlock *ContBlock = nullptr;<br>+ if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {<br>+ if (!CondConstant)<br>+ return;<br>+ } else {<br>+ auto *ThenBlock = createBasicBlock("omp.precond.then");<br>+ ContBlock = createBasicBlock("omp.precond.end");<br>+ emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,<br>+ getProfileCount(&S));<br>+ EmitBlock(ThenBlock);<br>+ incrementProfileCounter(&S);<br>+ }<br>+<br>+ // Emit 'then' code.<br>+ {<br>+ // Emit helper vars inits.<br>+ LValue LB =<br>+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));<br>+ LValue UB =<br>+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));<br>+ LValue ST =<br>+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));<br>+ LValue IL =<br>+ EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));<br>+<br>+ OMPPrivateScope LoopScope(*this);<br>+ emitPrivateLoopCounters(*this, LoopScope, S.counters(),<br>+ S.private_counters());<br>+ (void)LoopScope.Privatize();<br>+<br>+ // Detect the distribute schedule kind and chunk.<br>+ llvm::Value *Chunk = nullptr;<br>+ OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;<br>+ if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) {<br>+ ScheduleKind = C->getDistScheduleKind();<br>+ if (const auto *Ch = C->getChunkSize()) {<br>+ Chunk = EmitScalarExpr(Ch);<br>+ Chunk = EmitScalarConversion(Chunk, Ch->getType(),<br>+ S.getIterationVariable()->getType(),<br>+ S.getLocStart());<br>+ }<br>+ }<br>+ const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());<br>+ const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();<br>+<br>+ // OpenMP [2.10.8, distribute Construct, Description]<br>+ // If dist_schedule is specified, kind must be static. If specified,<br>+ // iterations are divided into chunks of size chunk_size, chunks are<br>+ // assigned to the teams of the league in a round-robin fashion in the<br>+ // order of the team number. When no chunk_size is specified, the<br>+ // iteration space is divided into chunks that are approximately equal<br>+ // in size, and at most one chunk is distributed to each team of the<br>+ // league. The size of the chunks is unspecified in this case.<br>+ if (RT.isStaticNonchunked(ScheduleKind,<br>+ /* Chunked */ Chunk != nullptr)) {<br>+ RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,<br>+ IVSize, IVSigned, /* Ordered = */ false,<br>+ IL.getAddress(), LB.getAddress(),<br>+ UB.getAddress(), ST.getAddress());<br>+ auto LoopExit =<br>+ getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));<br>+ // UB = min(UB, GlobalUB);<br>+ EmitIgnoredExpr(S.getEnsureUpperBound());</font><br><font size="4">+ // IV = LB;<br>+ EmitIgnoredExpr(S.getInit());<br>+ // while (idx <= UB) { BODY; ++idx; }<br>+ EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),<br>+ S.getInc(),<br>+ [&S, LoopExit](CodeGenFunction &CGF) {<br>+ CGF.EmitOMPLoopBody(S, LoopExit);<br>+ CGF.EmitStopPoint(&S);<br>+ },<br>+ [](CodeGenFunction &) {});<br>+ EmitBlock(LoopExit.getBlock());<br>+ // Tell the runtime we are done.<br>+ RT.emitForStaticFinish(*this, S.getLocStart());<br>+ } else {<br>+ // Emit the outer loop, which requests its work chunk [LB..UB] from<br>+ // runtime and runs the inner loop to process it.<br>+ EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope,<br>+ LB.getAddress(), UB.getAddress(), ST.getAddress(),<br>+ IL.getAddress(), Chunk);<br>+ }<br>+ }<br>+<br>+ // We're now done with the loop, so jump to the continuation block.<br>+ if (ContBlock) {<br>+ EmitBranch(ContBlock);<br>+ EmitBlock(ContBlock, true);<br>+ }<br>+ }<br>+}<br>+<br> void CodeGenFunction::EmitOMPDistributeDirective(<br> const OMPDistributeDirective &S) {<br>- llvm_unreachable("CodeGen for 'omp distribute' is not supported yet.");<br>+ LexicalScope Scope(*this, S.getSourceRange());<br>+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {<br>+ CGF.EmitOMPDistributeLoop(S);<br>+ };<br>+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,<br>+ false);<br> }<br><br> static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,<br><br>Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.h<br>URL: </font><a href="http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=262741&r1=262740&r2=262741&view=diff" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=262741&r1=262740&r2=262741&view=diff</font></u></a><font size="4"><br>==============================================================================<br>--- cfe/trunk/lib/CodeGen/CodeGenFunction.h (original)<br>+++ cfe/trunk/lib/CodeGen/CodeGenFunction.h Fri Mar 4 14:24:58 2016<br>@@ -2364,6 +2364,7 @@ public:<br> void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);<br> void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);<br> void EmitOMPDistributeDirective(const OMPDistributeDirective &S);<br>+ void EmitOMPDistributeLoop(const OMPDistributeDirective &S);<br><br> /// \brief Emit inner loop of the worksharing/simd construct.<br> ///<br>@@ -2393,11 +2394,18 @@ private:<br> /// \return true, if this construct has any lastprivate clause, false -<br> /// otherwise.<br> bool EmitOMPWorksharingLoop(const OMPLoopDirective &S);<br>+ void EmitOMPOuterLoop(bool IsMonotonic, bool DynamicOrOrdered,<br>+ const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,<br>+ Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk);<br> void EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,<br> bool IsMonotonic, const OMPLoopDirective &S,<br> OMPPrivateScope &LoopScope, bool Ordered, Address LB,<br> Address UB, Address ST, Address IL,<br> llvm::Value *Chunk);<br>+ void EmitOMPDistributeOuterLoop(<br>+ OpenMPDistScheduleClauseKind ScheduleKind,<br>+ const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,<br>+ Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk);<br> /// \brief Emit code for sections directive.<br> void EmitSections(const OMPExecutableDirective &S);<br><br><br>Modified: cfe/trunk/lib/Serialization/ASTReaderStmt.cpp<br>URL: </font><a href="http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Serialization/ASTReaderStmt.cpp?rev=262741&r1=262740&r2=262741&view=diff" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Serialization/ASTReaderStmt.cpp?rev=262741&r1=262740&r2=262741&view=diff</font></u></a><font size="4"><br>==============================================================================<br>--- cfe/trunk/lib/Serialization/ASTReaderStmt.cpp (original)<br>+++ cfe/trunk/lib/Serialization/ASTReaderStmt.cpp Fri Mar 4 14:24:58 2016<br>@@ -2307,7 +2307,8 @@ void ASTStmtReader::VisitOMPLoopDirectiv<br> D->setInit(Reader.ReadSubExpr());<br> D->setInc(Reader.ReadSubExpr());<br> if (isOpenMPWorksharingDirective(D->getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(D->getDirectiveKind())) {<br>+ isOpenMPTaskLoopDirective(D->getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(D->getDirectiveKind())) {<br> D->setIsLastIterVariable(Reader.ReadSubExpr());<br> D->setLowerBoundVariable(Reader.ReadSubExpr());<br> D->setUpperBoundVariable(Reader.ReadSubExpr());<br><br>Modified: cfe/trunk/lib/Serialization/ASTWriterStmt.cpp<br>URL: </font><a href="http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Serialization/ASTWriterStmt.cpp?rev=262741&r1=262740&r2=262741&view=diff" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Serialization/ASTWriterStmt.cpp?rev=262741&r1=262740&r2=262741&view=diff</font></u></a><font size="4"><br>==============================================================================<br>--- cfe/trunk/lib/Serialization/ASTWriterStmt.cpp (original)<br>+++ cfe/trunk/lib/Serialization/ASTWriterStmt.cpp Fri Mar 4 14:24:58 2016<br>@@ -2095,7 +2095,8 @@ void ASTStmtWriter::VisitOMPLoopDirectiv<br> Writer.AddStmt(D->getInit());<br> Writer.AddStmt(D->getInc());<br> if (isOpenMPWorksharingDirective(D->getDirectiveKind()) ||<br>- isOpenMPTaskLoopDirective(D->getDirectiveKind())) {<br>+ isOpenMPTaskLoopDirective(D->getDirectiveKind()) ||<br>+ isOpenMPDistributeDirective(D->getDirectiveKind())) {<br> Writer.AddStmt(D->getIsLastIterVariable());<br> Writer.AddStmt(D->getLowerBoundVariable());<br> Writer.AddStmt(D->getUpperBoundVariable());<br><br>Added: cfe/trunk/test/OpenMP/distribute_codegen.cpp<br>URL: </font><a href="http://llvm.org/viewvc/llvm-project/cfe/trunk/test/OpenMP/distribute_codegen.cpp?rev=262741&view=auto" target="_blank"><u><font size="4" color="#0000FF">http://llvm.org/viewvc/llvm-project/cfe/trunk/test/OpenMP/distribute_codegen.cpp?rev=262741&view=auto</font></u></a><font size="4"><br>==============================================================================<br>--- cfe/trunk/test/OpenMP/distribute_codegen.cpp (added)<br>+++ cfe/trunk/test/OpenMP/distribute_codegen.cpp Fri Mar 4 14:24:58 2016<br>@@ -0,0 +1,239 @@<br>+// Test host codegen.<br>+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -omptargets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64<br>+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -omptargets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s<br>+// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -omptargets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 --check-prefix HCHECK<br>+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -omptargets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 --check-prefix HCHECK<br>+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -omptargets=i386-pc-linux-gnu -emit-pch -o %t %s<br>+// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -omptargets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 --check-prefix HCHECK<br>+<br>+// Test target codegen - host bc file ha
s to be created first. (no significant differences with host version of target region)<br>+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -omptargets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc<br>+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -omptargets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -omp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s<br>+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -omptargets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -omp-host-ir-file-path %t-ppc-host.bc -o %t %s<br>+// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -omptargets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -omp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s<br>+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -omptargets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc<br>+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -omptargets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -omp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s<br>+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -omptargets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -omp-host-ir-file-path %t-x86-host.bc -o %t %s<br>+// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -omptargets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -omp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s<br>+<br>+// expected-no-diagnostics<br>+#ifndef HEADER<br>+#define HEADER<br>+<br>+// CHECK-DAG: %ident_t = type { i32, i32, i32, i32, i8* }<br>+// CHECK-DAG: [[STR:@.+]] = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00"<br>+// CHECK-DAG: [[DEF_LOC_0:@.+]] = private unnamed_addr constant %ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* [[STR]], i32 0, i32 0) }<br>+<br>+// CHECK-LABEL: define {{.*void}} @{{.*}}without_schedule_clause{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})<br>+void without_schedule_clause(float *a, float *b, float *c, float *d) {<br>+ #pragma omp target<br>+ #pragma omp teams<br>+ #pragma omp distribute<br>+ for (int i = 33; i < 32000000; i += 7) {<br>+ a[i] = b[i] * c[i] * d[i];<br>+ }<br>+}<br>+<br>+// CHECK: define {{.*}}void @.omp_outlined.(i32* noalias [[GBL_TIDP:%.+]], i32* noalias [[BND_TID:%.+]], float** dereferenceable({{[0-9]+}}) [[APTR:%.+]], float** dereferenceable({{[0-9]+}}) [[BPTR:%.+]], float** dereferenceable({{[0-9]+}}) [[CPTR:%.+]], float** dereferenceable({{[0-9]+}}) [[DPTR:%.+]])<br>+// CHECK: [[TID_ADDR:%.+]] = alloca i32*<br>+// CHECK: [[IV:%.+iv]] = alloca i32<br>+// CHECK: [[LB:%.+lb]] = alloca i32<br>+// CHECK: [[UB:%.+ub]] = alloca i32<br>+// CHECK: [[ST:%.+stride]] = alloca i32<br>+// CHECK: [[LAST:%.+last]] = alloca i32<br>+// CHECK-DAG: store i32* [[GBL_TIDP]], i32** [[TID_ADDR]]<br>+// CHECK-DAG: store i32 0, i32* [[LB]]<br>+// CHECK-DAG: store i32 4571423, i32* [[UB]]<br>+// CHECK-DAG: store i32 1, i32* [[ST]]<br>+// CHECK-DAG: store i32 0, i32* [[LAST]]<br>+// CHECK-DAG: [[GBL_TID:%.+]] = load i32*, i32** [[TID_ADDR]]<br>+// CHECK-DAG: [[GBL_TIDV:%.+]] = load i32, i32* [[GBL_TID]]<br>+// CHECK: call void @__kmpc_for_static_init_{{.+}}(%ident_t* [[DEF_LOC_0]], i32 [[GBL_TIDV]], i32 92, i32* %.omp.is_last, i32* %.</font><a href="http://omp.lb/" target="_blank"><u><font size="4" color="#0000FF">omp.lb</font></u></a><font size="4">, i32* %.omp.ub, i32* %.omp.stride, i32 1, i32 1)<br>+// CHECK-DAG: [[UBV0:%.+]] = load i32, i32* [[UB]]<br>+// CHECK-DAG: [[USWITCH:%.+]] = icmp sgt i32 [[UBV0]], 4571423<br>+// CHECK: br i1 [[USWITCH]], label %[[BBCT:.+]], label %[[BBCF:.+]]<br>+// CHECK-DAG: [[BBCT]]:<br>+// CHECK-DAG: br label %[[BBCE:.+]]<br>+// CHECK-DAG: [[BBCF]]:<br>+// CHECK-DAG: [[UBV1:%.+]] = load i32, i32* [[UB]]<br>+// CHECK-DAG: br label %[[BBCE]]<br>+// CHECK: [[BBCE]]:<br>+// CHECK: [[SELUB:%.+]] = phi i32 [ 4571423, %[[BBCT]] ], [ [[UBV1]], %[[BBCF]] ]<br>+// CHECK: store i32 [[SELUB]], i32* [[UB]]<br>+// CHECK: [[LBV0:%.+]] = load i32, i32* [[LB]]<br>+// CHECK: store i32 [[LBV0]], i32* [[IV]]<br>+// CHECK: br label %[[BBINNFOR:.+]]<br>+// CHECK: [[BBINNFOR]]:<br>+// CHECK: [[IVVAL0:%.+]] = load i32, i32* [[IV]]<br>+// CHECK: [[UBV2:%.+]] = load i32, i32* [[UB]]<br>+// CHECK: [[IVLEUB:%.+]] = icmp sle i32 [[IVVAL0]], [[UBV2]]<br>+// CHECK: br i1 [[IVLEUB]], label %[[BBINNBODY:.+]], label %[[BBINNEND:.+]]<br>+// CHECK: [[BBINNBODY]]:<br>+// CHECK: {{.+}} = load i32, i32* [[IV]]<br>+// ... loop body ...<br>+// CHECK: br label %[[BBBODYCONT:.+]]<br>+// CHECK: [[BBBODYCONT]]:<br>+// CHECK: br label %[[BBINNINC:.+]]<br>+// CHECK: [[BBINNINC]]:<br>+// CHECK: [[IVVAL1:%.+]] = load i32, i32* [[IV]]<br>+// CHECK: [[IVINC:%.+]] = add nsw i32 [[IVVAL1]], 1<br>+// CHECK: store i32 [[IVINC]], i32* [[IV]]<br>+// CHECK: br label %[[BBINNFOR]]<br>+// CHECK: [[BBINNEND]]:<br>+// CHECK: br label %[[LPEXIT:.+]]<br>+// CHECK: [[LPEXIT]]:<br>+// CHECK: call void @__kmpc_for_static_fini(%ident_t* [[DEF_LOC_0]], i32 [[GBL_TIDV]])<br>+// CHECK: ret void<br>+<br>+<br>+// CHECK-LABEL: define {{.*void}} @{{.*}}static_not_chunked{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})<br>+void static_not_chunked(float *a, float *b, float *c, float *d) {<br>+ #pragma omp target<br>+ #pragma omp teams<br>+ #pragma omp distribute dist_schedule(static)<br>+ for (int i = 32000000; i > 33; i += -7) {<br>+ a[i] = b[i] * c[i] * d[i];<br>+ }<br>+}<br>+<br>+// CHECK: define {{.*}}void @.omp_outlined.{{.*}}(i32* noalias [[GBL_TIDP:%.+]], i32* noalias [[BND_TID:%.+]], float** dereferenceable({{[0-9]+}}) [[APTR:%.+]], float** dereferenceable({{[0-9]+}}) [[BPTR:%.+]], float** dereferenceable({{[0-9]+}}) [[CPTR:%.+]], float** dereferenceable({{[0-9]+}}) [[DPTR:%.+]])<br>+// CHECK: [[TID_ADDR:%.+]] = alloca i32*<br>+// CHECK: [[IV:%.+iv]] = alloca i32<br>+// CHECK: [[LB:%.+lb]] = alloca i32<br>+// CHECK: [[UB:%.+ub]] = alloca i32<br>+// CHECK: [[ST:%.+stride]] = alloca i32<br>+// CHECK: [[LAST:%.+last]] = alloca i32<br>+// CHECK-DAG: store i32* [[GBL_TIDP]], i32** [[TID_ADDR]]<br>+// CHECK-DAG: store i32 0, i32* [[LB]]<br>+// CHECK-DAG: store i32 4571423, i32* [[UB]]<br>+// CHECK-DAG: store i32 1, i32* [[ST]]<br>+// CHECK-DAG: store i32 0, i32* [[LAST]]<br>+// CHECK-DAG: [[GBL_TID:%.+]] = load i32*, i32** [[TID_ADDR]]<br>+// CHECK-DAG: [[GBL_TIDV:%.+]] = load i32, i32* [[GBL_TID]]<br>+// CHECK: call void @__kmpc_for_static_init_{{.+}}(%ident_t* [[DEF_LOC_0]], i32 [[GBL_TIDV]], i32 92, i32* %.omp.is_last, i32* %.</font><a href="http://omp.lb/" target="_blank"><u><font size="4" color="#0000FF">omp.lb</font></u></a><font size="4">, i32* %.omp.ub, i32* %.omp.stride, i32 1, i32 1)<br>+// CHECK-DAG: [[UBV0:%.+]] = load i32, i32* [[UB]]<br>+// CHECK-DAG: [[USWITCH:%.+]] = icmp sgt i32 [[UBV0]], 4571423<br>+// CHECK: br i1 [[USWITCH]], label %[[BBCT:.+]], label %[[BBCF:.+]]<br>+// CHECK-DAG: [[BBCT]]:<br>+// CHECK-DAG: br label %[[BBCE:.+]]<br>+// CHECK-DAG: [[BBCF]]:<br>+// CHECK-DAG: [[UBV1:%.+]] = load i32, i32* [[UB]]<br>+// CHECK-DAG: br label %[[BBCE]]<br>+// CHECK: [[BBCE]]:<br>+// CHECK: [[SELUB:%.+]] = phi i32 [ 4571423, %[[BBCT]] ], [ [[UBV1]], %[[BBCF]] ]<br>+// CHECK: store i32 [[SELUB]], i32* [[UB]]<br>+// CHECK: [[LBV0:%.+]] = load i32, i32* [[LB]]<br>+// CHECK: store i32 [[LBV0]], i32* [[IV]]<br>+// CHECK: br label %[[BBINNFOR:.+]]<br>+// CHECK: [[BBINNFOR]]:<br>+// CHECK: [[IVVAL0:%.+]] = load i32, i32* [[IV]]<br>+// CHECK: [[UBV2:%.+]] = load i32, i32* [[UB]]<br>+// CHECK: [[IVLEUB:%.+]] = icmp sle i32 [[IVVAL0]], [[UBV2]]<br>+// CHECK: br i1 [[IVLEUB]], label %[[BBINNBODY:.+]], label %[[BBINNEND:.+]]<br>+// CHECK: [[BBINNBODY]]:<br>+// CHECK: {{.+}} = load i32, i32* [[IV]]<br>+// ... loop body ...<br>+// CHECK: br label %[[BBBODYCONT:.+]]<br>+// CHECK: [[BBBODYCONT]]:<br>+// CHECK: br label %[[BBINNINC:.+]]<br>+// CHECK: [[BBINNINC]]:<br>+// CHECK: [[IVVAL1:%.+]] = load i32, i32* [[IV]]<br>+// CHECK: [[IVINC:%.+]] = add nsw i32 [[IVVAL1]], 1<br>+// CHECK: store i32 [[IVINC]], i32* [[IV]]<br>+// CHECK: br label %[[BBINNFOR]]<br>+// CHECK: [[BBINNEND]]:<br>+// CHECK: br label %[[LPEXIT:.+]]<br>+// CHECK: [[LPEXIT]]:<br>+// CHECK: call void @__kmpc_for_static_fini(%ident_t* [[DEF_LOC_0]], i32 [[GBL_TIDV]])<br>+// CHECK: ret void<br>+<br>+<br>+// CHECK-LABEL: define {{.*void}} @{{.*}}static_chunked{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})<br>+void static_chunked(float *a, float *b, float *c, float *d) {<br>+ #pragma omp target<br>+ #pragma omp teams<br>+#pragma omp distribute dist_schedule(static, 5)<br>+ for (unsigned i = 131071; i <= 2147483647; i += 127) {<br>+ a[i] = b[i] * c[i] * d[i];<br>+ }<br>+}<br>+<br>+// CHECK: define {{.*}}void @.omp_outlined.{{.*}}(i32* noalias [[GBL_TIDP:%.+]], i32* noalias [[BND_TID:%.+]], float** dereferenceable({{[0-9]+}}) [[APTR:%.+]], float** dereferenceable({{[0-9]+}}) [[BPTR:%.+]], float** dereferenceable({{[0-9]+}}) [[CPTR:%.+]], float** dereferenceable({{[0-9]+}}) [[DPTR:%.+]])<br>+// CHECK: [[TID_ADDR:%.+]] = alloca i32*<br>+// CHECK: [[IV:%.+iv]] = alloca i32<br>+// CHECK: [[LB:%.+lb]] = alloca i32<br>+// CHECK: [[UB:%.+ub]] = alloca i32<br>+// CHECK: [[ST:%.+stride]] = alloca i32<br>+// CHECK: [[LAST:%.+last]] = alloca i32<br>+// CHECK-DAG: store i32* [[GBL_TIDP]], i32** [[TID_ADDR]]<br>+// CHECK-DAG: store i32 0, i32* [[LB]]<br>+// CHECK-DAG: store i32 16908288, i32* [[UB]]<br>+// CHECK-DAG: store i32 1, i32* [[ST]]<br>+// CHECK-DAG: store i32 0, i32* [[LAST]]<br>+// CHECK-DAG: [[GBL_TID:%.+]] = load i32*, i32** [[TID_ADDR]]<br>+// CHECK-DAG: [[GBL_TIDV:%.+]] = load i32, i32* [[GBL_TID]]<br>+// CHECK: call void @__kmpc_for_static_init_{{.+}}(%ident_t* [[DEF_LOC_0]], i32 [[GBL_TIDV]], i32 91, i32* %.omp.is_last, i32* %.</font><a href="http://omp.lb/" target="_blank"><u><font size="4" color="#0000FF">omp.lb</font></u></a><font size="4">, i32* %.omp.ub, i32* %.omp.stride, i32 1, i32 5)<br>+// CHECK-DAG: [[UBV0:%.+]] = load i32, i32* [[UB]]<br>+// CHECK-DAG: [[USWITCH:%.+]] = icmp ugt i32 [[UBV0]], 16908288<br>+// CHECK: br i1 [[USWITCH]], label %[[BBCT:.+]], label %[[BBCF:.+]]<br>+// CHECK-DAG: [[BBCT]]:<br>+// CHECK-DAG: br label %[[BBCE:.+]]<br>+// CHECK-DAG: [[BBCF]]:<br>+// CHECK-DAG: [[UBV1:%.+]] = load i32, i32* [[UB]]<br>+// CHECK-DAG: br label %[[BBCE]]<br>+// CHECK: [[BBCE]]:<br>+// CHECK: [[SELUB:%.+]] = phi i32 [ 16908288, %[[BBCT]] ], [ [[UBV1]], %[[BBCF]] ]<br>+// CHECK: store i32 [[SELUB]], i32* [[UB]]<br>+// CHECK: [[LBV0:%.+]] = load i32, i32* [[LB]]<br>+// CHECK: store i32 [[LBV0]], i32* [[IV]]<br>+// CHECK: br label %[[BBINNFOR:.+]]<br>+// CHECK: [[BBINNFOR]]:<br>+// CHECK: [[IVVAL0:%.+]] = load i32, i32* [[IV]]<br>+// CHECK: [[UBV2:%.+]] = load i32, i32* [[UB]]<br>+// CHECK: [[IVLEUB:%.+]] = icmp ule i32 [[IVVAL0]], [[UBV2]]<br>+// CHECK: br i1 [[IVLEUB]], label %[[BBINNBODY:.+]], label %[[BBINNEND:.+]]<br>+// CHECK: [[BBINNBODY]]:<br>+// CHECK: {{.+}} = load i32, i32* [[IV]]<br>+// ... loop body ...<br>+// CHECK: br label %[[BBBODYCONT:.+]]<br>+// CHECK: [[BBBODYCONT]]:<br>+// CHECK: br label %[[BBINNINC:.+]]<br>+// CHECK: [[BBINNINC]]:<br>+// CHECK: [[IVVAL1:%.+]] = load i32, i32* [[IV]]<br>+// CHECK: [[IVINC:%.+]] = add i32 [[IVVAL1]], 1<br>+// CHECK: store i32 [[IVINC]], i32* [[IV]]<br>+// CHECK: br label %[[BBINNFOR]]<br>+// CHECK: [[BBINNEND]]:<br>+// CHECK: br label %[[LPEXIT:.+]]<br>+// CHECK: [[LPEXIT]]:<br>+// CHECK: call void @__kmpc_for_static_fini(%ident_t* [[DEF_LOC_0]], i32 [[GBL_TIDV]])<br>+// CHECK: ret void<br>+<br>+// CHECK-LABEL: test_precond<br>+void test_precond() {<br>+ char a = 0;<br>+ #pragma omp target<br>+ #pragma omp teams<br>+ #pragma omp distribute<br>+ for(char i = a; i < 10; ++i);<br>+}<br>+<br>+// a is passed as a parameter to the outlined functions<br>+// CHECK: define {{.*}}void @.omp_outlined.{{.*}}(i32* noalias [[GBL_TIDP:%.+]], i32* noalias [[BND_TID:%.+]], i8* dereferenceable({{[0-9]+}}) [[APARM:%.+]])<br>+// CHECK: store i8* [[APARM]], i8** [[APTRADDR:%.+]]<br>+// ..many loads of %0..<br>+// CHECK: [[A2:%.+]] = load i8*, i8** [[APTRADDR]]<br>+// CHECK: [[AVAL0:%.+]] = load i8, i8* [[A2]]<br>+// CHECK: [[AVAL1:%.+]] = load i8, i8* [[A2]]<br>+// CHECK: [[AVAL2:%.+]] = load i8, i8* [[A2]]<br>+// CHECK: [[ACONV:%.+]] = sext i8 [[AVAL2]] to i32<br>+// CHECK: [[ACMP:%.+]] = icmp slt i32 [[ACONV]], 10<br>+// CHECK: br i1 [[ACMP]], label %[[PRECOND_THEN:.+]], label %[[PRECOND_END:.+]]<br>+// CHECK: [[PRECOND_THEN]]<br>+// CHECK: call void @__kmpc_for_static_init_4<br>+// CHECK: call void @__kmpc_for_static_fini<br>+// CHECK: [[PRECOND_END]]<br>+<br>+// no templates for now, as these require special handling in target regions and/or declare target<br>+<br>+#endif<br><br><br>_______________________________________________<br>cfe-commits mailing list</font><u><font size="4" color="#0000FF"><br></font></u><a href="mailto:cfe-commits@lists.llvm.org"><u><font size="4" color="#0000FF">cfe-commits@lists.llvm.org</font></u></a><u><font size="4" color="#0000FF"><br></font></u><a href="http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits" target="_blank"><u><font size="4" color="#0000FF">http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits</font></u></a><br><br></ul><BR>
</body></html>