[clang] [clang] Fix OMPT ident flag in combined distribute parallel for pragma (PR #80987)
via cfe-commits
cfe-commits at lists.llvm.org
Wed Feb 7 04:45:10 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang
Author: None (mikaoP)
<details>
<summary>Changes</summary>
---
Patch is 1.89 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/80987.diff
88 Files Affected:
- (modified) clang/lib/CodeGen/CGOpenMPRuntime.cpp (+1)
- (modified) clang/lib/CodeGen/CGStmtOpenMP.cpp (+9-6)
- (modified) clang/lib/CodeGen/CodeGenFunction.h (+2)
- (modified) clang/test/OpenMP/bug60602.cpp (+1-1)
- (modified) clang/test/OpenMP/distribute_parallel_for_codegen.cpp (+32-32)
- (modified) clang/test/OpenMP/distribute_parallel_for_firstprivate_codegen.cpp (+10-10)
- (modified) clang/test/OpenMP/distribute_parallel_for_if_codegen.cpp (+8-8)
- (modified) clang/test/OpenMP/distribute_parallel_for_lastprivate_codegen.cpp (+10-10)
- (modified) clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp (+280-280)
- (modified) clang/test/OpenMP/distribute_parallel_for_private_codegen.cpp (+10-10)
- (modified) clang/test/OpenMP/distribute_parallel_for_proc_bind_codegen.cpp (+3-3)
- (modified) clang/test/OpenMP/distribute_parallel_for_reduction_task_codegen.cpp (+17-17)
- (modified) clang/test/OpenMP/distribute_parallel_for_simd_codegen.cpp (+30-30)
- (modified) clang/test/OpenMP/distribute_parallel_for_simd_firstprivate_codegen.cpp (+24-24)
- (modified) clang/test/OpenMP/distribute_parallel_for_simd_if_codegen.cpp (+34-34)
- (modified) clang/test/OpenMP/distribute_parallel_for_simd_lastprivate_codegen.cpp (+32-32)
- (modified) clang/test/OpenMP/distribute_parallel_for_simd_num_threads_codegen.cpp (+336-336)
- (modified) clang/test/OpenMP/distribute_parallel_for_simd_private_codegen.cpp (+6-6)
- (modified) clang/test/OpenMP/distribute_parallel_for_simd_proc_bind_codegen.cpp (+3-3)
- (modified) clang/test/OpenMP/distribute_simd_codegen.cpp (+18-18)
- (modified) clang/test/OpenMP/nvptx_SPMD_codegen.cpp (+48-48)
- (modified) clang/test/OpenMP/nvptx_distribute_parallel_generic_mode_codegen.cpp (+2-2)
- (modified) clang/test/OpenMP/nvptx_target_teams_distribute_parallel_for_codegen.cpp (+18-18)
- (modified) clang/test/OpenMP/nvptx_target_teams_distribute_parallel_for_generic_mode_codegen.cpp (+2-2)
- (modified) clang/test/OpenMP/nvptx_target_teams_distribute_parallel_for_simd_codegen.cpp (+8-8)
- (modified) clang/test/OpenMP/nvptx_target_teams_generic_loop_codegen.cpp (+18-18)
- (modified) clang/test/OpenMP/nvptx_target_teams_generic_loop_generic_mode_codegen.cpp (+4-4)
- (modified) clang/test/OpenMP/reduction_implicit_map.cpp (+16-16)
- (modified) clang/test/OpenMP/target_ompx_dyn_cgroup_mem_codegen.cpp (+58-58)
- (modified) clang/test/OpenMP/target_teams_distribute_codegen.cpp (+64-64)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_codegen.cpp (+15-15)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_collapse_codegen.cpp (+6-6)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_dist_schedule_codegen.cpp (+18-18)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_firstprivate_codegen.cpp (+15-15)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_if_codegen.cpp (+8-8)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_lastprivate_codegen.cpp (+10-10)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_private_codegen.cpp (+15-15)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_proc_bind_codegen.cpp (+3-3)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_codegen.cpp (+25-25)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_task_codegen.cpp (+19-19)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_schedule_codegen.cpp (+36-36)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp (+12-12)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp (+6-6)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp (+18-18)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_firstprivate_codegen.cpp (+17-17)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_if_codegen.cpp (+40-40)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_lastprivate_codegen.cpp (+32-32)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_private_codegen.cpp (+10-10)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_proc_bind_codegen.cpp (+3-3)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_reduction_codegen.cpp (+25-25)
- (modified) clang/test/OpenMP/target_teams_distribute_parallel_for_simd_schedule_codegen.cpp (+36-36)
- (modified) clang/test/OpenMP/target_teams_generic_loop_codegen-1.cpp (+15-15)
- (modified) clang/test/OpenMP/target_teams_generic_loop_collapse_codegen.cpp (+10-10)
- (modified) clang/test/OpenMP/target_teams_generic_loop_if_codegen.cpp (+9-9)
- (modified) clang/test/OpenMP/target_teams_generic_loop_private_codegen.cpp (+21-21)
- (modified) clang/test/OpenMP/target_teams_generic_loop_reduction_codegen.cpp (+25-25)
- (modified) clang/test/OpenMP/target_teams_generic_loop_uses_allocators_codegen.cpp (+2-2)
- (modified) clang/test/OpenMP/teams_distribute_dist_schedule_codegen.cpp (+42-42)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_codegen.cpp (+52-52)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_collapse_codegen.cpp (+30-30)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_copyin_codegen.cpp (+27-27)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_dist_schedule_codegen.cpp (+78-78)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_firstprivate_codegen.cpp (+18-18)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_if_codegen.cpp (+39-39)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_lastprivate_codegen.cpp (+56-56)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_num_threads_codegen.cpp (+138-138)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_private_codegen.cpp (+18-18)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_proc_bind_codegen.cpp (+14-14)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_reduction_codegen.cpp (+54-54)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_reduction_task_codegen.cpp (+19-19)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_schedule_codegen.cpp (+228-228)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_codegen.cpp (+50-50)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp (+30-30)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp (+78-78)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_firstprivate_codegen.cpp (+20-20)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_if_codegen.cpp (+162-162)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_lastprivate_codegen.cpp (+78-78)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_codegen.cpp (+164-164)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_private_codegen.cpp (+5-5)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_proc_bind_codegen.cpp (+14-14)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_reduction_codegen.cpp (+54-54)
- (modified) clang/test/OpenMP/teams_distribute_parallel_for_simd_schedule_codegen.cpp (+228-228)
- (modified) clang/test/OpenMP/teams_distribute_simd_dist_schedule_codegen.cpp (+42-42)
- (modified) clang/test/OpenMP/teams_generic_loop_codegen-1.cpp (+58-58)
- (modified) clang/test/OpenMP/teams_generic_loop_codegen.cpp (+12-12)
- (modified) clang/test/OpenMP/teams_generic_loop_collapse_codegen.cpp (+34-34)
- (modified) clang/test/OpenMP/teams_generic_loop_private_codegen.cpp (+21-21)
- (modified) clang/test/OpenMP/teams_generic_loop_reduction_codegen.cpp (+57-57)
``````````diff
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 4855e7410a015a..452f24f9149027 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -2647,6 +2647,7 @@ void CGOpenMPRuntime::emitDistributeStaticInit(
void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind DKind) {
+ assert(DKind == OMPD_distribute || DKind == OMPD_for || DKind == OMPD_sections);
if (!CGF.HaveInsertPoint())
return;
// Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 8fd74697de3c0f..d5e6ecc6325998 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -2910,10 +2910,10 @@ void CodeGenFunction::EmitOMPOuterLoop(
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
+ auto &&CodeGen = [DynamicOrOrdered, &S, &LoopArgs](CodeGenFunction &CGF) {
if (!DynamicOrOrdered)
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
- S.getDirectiveKind());
+ LoopArgs.DKind);
};
OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
}
@@ -3019,6 +3019,7 @@ void CodeGenFunction::EmitOMPForOuterLoop(
OuterLoopArgs.Cond = S.getCond();
OuterLoopArgs.NextLB = S.getNextLowerBound();
OuterLoopArgs.NextUB = S.getNextUpperBound();
+ OuterLoopArgs.DKind = LoopArgs.DKind;
EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
}
@@ -3080,6 +3081,7 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedNextUpperBound()
: S.getNextUpperBound();
+ OuterLoopArgs.DKind = OMPD_distribute;
EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
LoopScope, OuterLoopArgs, CodeGenLoopContent,
@@ -3452,15 +3454,16 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
- S.getDirectiveKind());
+ OMPD_for);
};
OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
- const OMPLoopArguments LoopArguments(
+ OMPLoopArguments LoopArguments(
LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
IL.getAddress(*this), Chunk, EUB);
+ LoopArguments.DKind = OMPD_for;
EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
LoopArguments, CGDispatchBounds);
}
@@ -4082,7 +4085,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
- S.getDirectiveKind());
+ OMPD_sections);
};
CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
@@ -5782,7 +5785,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
});
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind());
+ RT.emitForStaticFinish(*this, S.getEndLoc(), OMPD_distribute);
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 143ad64e8816b1..6aa3be047d8870 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -3808,6 +3808,8 @@ class CodeGenFunction : public CodeGenTypeCache {
Expr *NextLB = nullptr;
/// Update of UB after a whole chunk has been executed
Expr *NextUB = nullptr;
+ /// Distinguish between the for distribute and sections
+ OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
OMPLoopArguments() = default;
OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
diff --git a/clang/test/OpenMP/bug60602.cpp b/clang/test/OpenMP/bug60602.cpp
index 2fbfdfde07a0cc..613af513c3d1f7 100644
--- a/clang/test/OpenMP/bug60602.cpp
+++ b/clang/test/OpenMP/bug60602.cpp
@@ -564,7 +564,7 @@ int kernel_within_loop(int *a, int *b, int N, int num_iters) {
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
-// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP23]])
+// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
// CHECK-NEXT: br label [[OMP_PRECOND_END]]
// CHECK: omp.precond.end:
// CHECK-NEXT: ret void
diff --git a/clang/test/OpenMP/distribute_parallel_for_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_codegen.cpp
index 7bdc4c5ab21a7b..43efd281590faa 100644
--- a/clang/test/OpenMP/distribute_parallel_for_codegen.cpp
+++ b/clang/test/OpenMP/distribute_parallel_for_codegen.cpp
@@ -1027,7 +1027,7 @@ int main() {
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void
@@ -1266,7 +1266,7 @@ int main() {
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void
@@ -1535,7 +1535,7 @@ int main() {
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void
@@ -1774,7 +1774,7 @@ int main() {
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void
@@ -2047,7 +2047,7 @@ int main() {
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP40]], align 4
-// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP41]])
+// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP41]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void
@@ -2798,7 +2798,7 @@ int main() {
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-NEXT: ret void
@@ -3030,7 +3030,7 @@ int main() {
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-NEXT: ret void
@@ -3292,7 +3292,7 @@ int main() {
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-NEXT: ret void
@@ -3524,7 +3524,7 @@ int main() {
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-NEXT: ret void
@@ -3788,7 +3788,7 @@ int main() {
// CHECK3: omp.dispatch.end:
// CHECK3-NEXT: [[TMP40:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP40]], align 4
-// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP41]])
+// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP41]])
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-NEXT: ret void
@@ -5122,7 +5122,7 @@ int main() {
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -5351,7 +5351,7 @@ int main() {
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -5610,7 +5610,7 @@ int main() {
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -5839,7 +5839,7 @@ int main() {
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -6102,7 +6102,7 @@ int main() {
// CHECK9: omp.dispatch.end:
// CHECK9-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP37]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP37]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -7428,12 +7428,12 @@ int main() {
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: cancel.exit:
// CHECK9-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP36]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP36]])
// CHECK9-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: br label [[CANCEL_CONT]]
@@ -7664,7 +7664,7 @@ int main() {
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -7923,7 +7923,7 @@ int main() {
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -8152,7 +8152,7 @@ int main() {
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -8415,7 +8415,7 @@ int main() {
// CHECK9: omp.dispatch.end:
// CHECK9-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4
-// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP37]])
+// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP37]])
// CHECK9-NEXT: br label [[OMP_PRECOND_END]]
// CHECK9: omp.precond.end:
// CHECK9-NEXT: ret void
@@ -9736,7 +9736,7 @@ int main() {
// CHECK11: omp.loop.exit:
// CHECK11-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK11-NEXT: br label [[OMP_PRECOND_END]]
// CHECK11: omp.precond.end:
// CHECK11-NEXT: ret void
@@ -9958,7 +9958,7 @@ int main() {
// CHECK11: omp.loop.exit:
// CHECK11-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK11-NEXT: br label [[OMP_PRECOND_END]]
// CHECK11: omp.precond.end:
// CHECK11-NEXT: ret void
@@ -10210,7 +10210,7 @@ int main() {
// CHECK11: omp.loop.exit:
// CHECK11-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK11-NEXT: br label [[OMP_PRECOND_END]]
// CHECK11: omp.precond.end:
// CHECK11-NEXT: ret void
@@ -10432,7 +10432,7 @@ int main() {
// CHECK11: omp.loop.exit:
// CHECK11-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP30]])
// CHECK11-NEXT: br label [[OMP_PRECOND_END]]
// CHECK11: omp.precond.end:
// CHECK11-NEXT: ret void
@@ -10686,7 +10686,7 @@ int main() {
// CHECK11: omp.dispatch.end:
// CHECK11-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4
-// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP37]])
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP37]])
// CHECK11-NEXT: br label [[OMP_PRECOND_END]]
// CHECK11: omp.precond.end:
// CHECK11-NEXT: ret void
@@ -11991,12 +11991,12 @@ int main() {
// CHECK11: omp.loop.exit:
// CHECK11-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4
-// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]])
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP34]])
// CHECK11-NEXT: br label [[OMP_PRECOND_END]]
// CHECK11: cancel.exit:
// CHECK11-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP35]], align 4
-// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP36]])
+// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP36]])
// CHECK11-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK11: omp.precond.end:
// CHECK11-NEXT: br label [[CANCEL_CONT]]
@@ -12220,7 +12220,7 @@ int main() {
// CHECK11: omp.loop.exit:
// CHECK11-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
-// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]])
+// ...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/80987
More information about the cfe-commits
mailing list