[Openmp-commits] [openmp] 787eb0c - [OpenMP] libomp cleanup: add check of input global tid parameter
via Openmp-commits
openmp-commits at lists.llvm.org
Mon Jul 20 13:50:19 PDT 2020
Author: AndreyChurbanov
Date: 2020-07-20T23:49:58+03:00
New Revision: 787eb0c637b26ce88e91403584b016a42ab5d59c
URL: https://github.com/llvm/llvm-project/commit/787eb0c637b26ce88e91403584b016a42ab5d59c
DIFF: https://github.com/llvm/llvm-project/commit/787eb0c637b26ce88e91403584b016a42ab5d59c.diff
LOG: [OpenMP] libomp cleanup: add check of input global tid parameter
Add check of negative gtid before indexing __kmp_threads.
This makes static analyzers happier.
This is the first part of the patch split in two parts.
Differential Revision: https://reviews.llvm.org/D84062
Added:
Modified:
openmp/runtime/src/kmp.h
openmp/runtime/src/kmp_csupport.cpp
openmp/runtime/src/kmp_dispatch.cpp
openmp/runtime/src/kmp_error.cpp
openmp/runtime/src/kmp_sched.cpp
openmp/runtime/src/kmp_taskdeps.cpp
openmp/runtime/src/kmp_tasking.cpp
Removed:
################################################################################
diff --git a/openmp/runtime/src/kmp.h b/openmp/runtime/src/kmp.h
index 5f9b7c895619..f266d0fb73fb 100644
--- a/openmp/runtime/src/kmp.h
+++ b/openmp/runtime/src/kmp.h
@@ -3078,6 +3078,11 @@ static inline kmp_team_t *__kmp_team_from_gtid(int gtid) {
return __kmp_threads[gtid]->th.th_team;
}
+static inline void __kmp_assert_valid_gtid(kmp_int32 gtid) {
+ if (UNLIKELY(gtid < 0 || gtid >= __kmp_threads_capacity))
+ KMP_FATAL(ThreadIdentInvalid);
+}
+
/* ------------------------------------------------------------------------- */
extern kmp_global_t __kmp_global; /* global status */
diff --git a/openmp/runtime/src/kmp_csupport.cpp b/openmp/runtime/src/kmp_csupport.cpp
index 9cfa64d6ff9e..b5c641cc7273 100644
--- a/openmp/runtime/src/kmp_csupport.cpp
+++ b/openmp/runtime/src/kmp_csupport.cpp
@@ -231,13 +231,12 @@ void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_threads) {
KA_TRACE(20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n",
global_tid, num_threads));
-
+ __kmp_assert_valid_gtid(global_tid);
__kmp_push_num_threads(loc, global_tid, num_threads);
}
void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid) {
KA_TRACE(20, ("__kmpc_pop_num_threads: enter\n"));
-
/* the num_threads are automatically popped */
}
@@ -245,7 +244,7 @@ void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
kmp_int32 proc_bind) {
KA_TRACE(20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", global_tid,
proc_bind));
-
+ __kmp_assert_valid_gtid(global_tid);
__kmp_push_proc_bind(loc, global_tid, (kmp_proc_bind_t)proc_bind);
}
@@ -353,7 +352,7 @@ void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
KA_TRACE(20,
("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n",
global_tid, num_teams, num_threads));
-
+ __kmp_assert_valid_gtid(global_tid);
__kmp_push_num_teams(loc, global_tid, num_teams, num_threads);
}
@@ -474,9 +473,10 @@ conditional parallel region, like this,
when the condition is false.
*/
void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
-// The implementation is now in kmp_runtime.cpp so that it can share static
-// functions with kmp_fork_call since the tasks to be done are similar in
-// each case.
+ // The implementation is now in kmp_runtime.cpp so that it can share static
+ // functions with kmp_fork_call since the tasks to be done are similar in
+ // each case.
+ __kmp_assert_valid_gtid(global_tid);
#if OMPT_SUPPORT
OMPT_STORE_RETURN_ADDRESS(global_tid);
#endif
@@ -504,6 +504,7 @@ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
return;
// Not autopar code
+ __kmp_assert_valid_gtid(global_tid);
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
@@ -713,6 +714,7 @@ Execute a barrier.
void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) {
KMP_COUNT_BLOCK(OMP_BARRIER);
KC_TRACE(10, ("__kmpc_barrier: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
@@ -762,6 +764,7 @@ kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) {
int status = 0;
KC_TRACE(10, ("__kmpc_master: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
@@ -816,7 +819,7 @@ thread that executes the <tt>master</tt> region.
*/
void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) {
KC_TRACE(10, ("__kmpc_end_master: called T#%d\n", global_tid));
-
+ __kmp_assert_valid_gtid(global_tid);
KMP_DEBUG_ASSERT(KMP_MASTER_GTID(global_tid));
KMP_POP_PARTITIONED_TIMER();
@@ -833,9 +836,6 @@ void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) {
#endif
if (__kmp_env_consistency_check) {
- if (global_tid < 0)
- KMP_WARNING(ThreadIdentInvalid);
-
if (KMP_MASTER_GTID(global_tid))
__kmp_pop_sync(global_tid, ct_master, loc);
}
@@ -854,6 +854,7 @@ void __kmpc_ordered(ident_t *loc, kmp_int32 gtid) {
KMP_DEBUG_ASSERT(__kmp_init_serial);
KC_TRACE(10, ("__kmpc_ordered: called T#%d\n", gtid));
+ __kmp_assert_valid_gtid(gtid);
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
@@ -925,6 +926,7 @@ void __kmpc_end_ordered(ident_t *loc, kmp_int32 gtid) {
kmp_info_t *th;
KC_TRACE(10, ("__kmpc_end_ordered: called T#%d\n", gtid));
+ __kmp_assert_valid_gtid(gtid);
#if USE_ITT_BUILD
__kmp_itt_ordered_end(gtid);
@@ -1147,7 +1149,7 @@ static kmp_user_lock_p __kmp_get_critical_section_ptr(kmp_critical_name *crit,
/*!
@ingroup WORK_SHARING
@param loc source location information.
- at param global_tid global thread number .
+ at param global_tid global thread number.
@param crit identity of the critical section. This could be a pointer to a lock
associated with the critical section, or some other suitably unique value.
@@ -1170,6 +1172,7 @@ void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
kmp_user_lock_p lck;
KC_TRACE(10, ("__kmpc_critical: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
// TODO: add THR_OVHD_STATE
@@ -1392,6 +1395,7 @@ void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
#endif
KC_TRACE(10, ("__kmpc_critical: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
// Check if it is initialized.
@@ -1607,8 +1611,8 @@ this function.
*/
kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) {
int status;
-
KC_TRACE(10, ("__kmpc_barrier_master: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
@@ -1651,7 +1655,7 @@ still be waiting at the barrier and this call releases them.
*/
void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) {
KC_TRACE(10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid));
-
+ __kmp_assert_valid_gtid(global_tid);
__kmp_end_split_barrier(bs_plain_barrier, global_tid);
}
@@ -1667,8 +1671,8 @@ There is no equivalent "end" function, since the
*/
kmp_int32 __kmpc_barrier_master_nowait(ident_t *loc, kmp_int32 global_tid) {
kmp_int32 ret;
-
KC_TRACE(10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
@@ -1706,14 +1710,9 @@ kmp_int32 __kmpc_barrier_master_nowait(ident_t *loc, kmp_int32 global_tid) {
if (__kmp_env_consistency_check) {
/* there's no __kmpc_end_master called; so the (stats) */
/* actions of __kmpc_end_master are done here */
-
- if (global_tid < 0) {
- KMP_WARNING(ThreadIdentInvalid);
- }
if (ret) {
/* only one thread should do the pop since only */
/* one did the push (see __kmpc_master()) */
-
__kmp_pop_sync(global_tid, ct_master, loc);
}
}
@@ -1734,6 +1733,7 @@ should introduce an explicit barrier if it is required.
*/
kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid) {
+ __kmp_assert_valid_gtid(global_tid);
kmp_int32 rc = __kmp_enter_single(global_tid, loc, TRUE);
if (rc) {
@@ -1786,6 +1786,7 @@ only be called by the thread that executed the block of code protected
by the `single` construct.
*/
void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid) {
+ __kmp_assert_valid_gtid(global_tid);
__kmp_exit_single(global_tid);
KMP_POP_PARTITIONED_TIMER();
@@ -2065,8 +2066,8 @@ void __kmpc_copyprivate(ident_t *loc, kmp_int32 gtid, size_t cpy_size,
void *cpy_data, void (*cpy_func)(void *, void *),
kmp_int32 didit) {
void **data_ptr;
-
KC_TRACE(10, ("__kmpc_copyprivate: called T#%d\n", gtid));
+ __kmp_assert_valid_gtid(gtid);
KMP_MB();
@@ -3382,6 +3383,7 @@ __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
kmp_team_t *team;
int teams_swapped = 0, task_state;
KA_TRACE(10, ("__kmpc_reduce_nowait() enter: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
// why do we need this initialization here at all?
// Reduction clause can not be used as a stand-alone directive.
@@ -3535,6 +3537,7 @@ void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE(10, ("__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
packed_reduction_method = __KMP_GET_REDUCTION_METHOD(global_tid);
@@ -3609,6 +3612,7 @@ kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
int teams_swapped = 0, task_state;
KA_TRACE(10, ("__kmpc_reduce() enter: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
// why do we need this initialization here at all?
// Reduction clause can not be a stand-alone directive.
@@ -3727,6 +3731,7 @@ void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
int teams_swapped = 0, task_state;
KA_TRACE(10, ("__kmpc_end_reduce() enter: called T#%d\n", global_tid));
+ __kmp_assert_valid_gtid(global_tid);
th = __kmp_thread_from_gtid(global_tid);
teams_swapped = __kmp_swap_teams_for_teams_reduction(th, &team, &task_state);
@@ -3883,6 +3888,7 @@ e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2.
*/
void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims,
const struct kmp_dim *dims) {
+ __kmp_assert_valid_gtid(gtid);
int j, idx;
kmp_int64 last, trace_count;
kmp_info_t *th = __kmp_threads[gtid];
@@ -4002,6 +4008,7 @@ void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims,
}
void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) {
+ __kmp_assert_valid_gtid(gtid);
kmp_int32 shft, num_dims, i;
kmp_uint32 flag;
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
@@ -4112,6 +4119,7 @@ void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) {
}
void __kmpc_doacross_post(ident_t *loc, int gtid, const kmp_int64 *vec) {
+ __kmp_assert_valid_gtid(gtid);
kmp_int32 shft, num_dims, i;
kmp_uint32 flag;
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
@@ -4183,6 +4191,7 @@ void __kmpc_doacross_post(ident_t *loc, int gtid, const kmp_int64 *vec) {
}
void __kmpc_doacross_fini(ident_t *loc, int gtid) {
+ __kmp_assert_valid_gtid(gtid);
kmp_int32 num_done;
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
diff --git a/openmp/runtime/src/kmp_dispatch.cpp b/openmp/runtime/src/kmp_dispatch.cpp
index 9d7b81733eba..ca14bbc954af 100644
--- a/openmp/runtime/src/kmp_dispatch.cpp
+++ b/openmp/runtime/src/kmp_dispatch.cpp
@@ -773,6 +773,7 @@ __kmp_dispatch_init(ident_t *loc, int gtid, enum sched_type schedule, T lb,
sizeof(dispatch_private_info));
KMP_BUILD_ASSERT(sizeof(dispatch_shared_info_template<UT>) ==
sizeof(dispatch_shared_info));
+ __kmp_assert_valid_gtid(gtid);
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
@@ -997,6 +998,7 @@ __kmp_dispatch_init(ident_t *loc, int gtid, enum sched_type schedule, T lb,
template <typename UT>
static void __kmp_dispatch_finish(int gtid, ident_t *loc) {
typedef typename traits_t<UT>::signed_t ST;
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *th = __kmp_threads[gtid];
KD_TRACE(100, ("__kmp_dispatch_finish: T#%d called\n", gtid));
@@ -1060,6 +1062,7 @@ static void __kmp_dispatch_finish(int gtid, ident_t *loc) {
template <typename UT>
static void __kmp_dispatch_finish_chunk(int gtid, ident_t *loc) {
typedef typename traits_t<UT>::signed_t ST;
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *th = __kmp_threads[gtid];
KD_TRACE(100, ("__kmp_dispatch_finish_chunk: T#%d called\n", gtid));
@@ -1900,6 +1903,7 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
int status;
dispatch_private_info_template<T> *pr;
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
@@ -2192,6 +2196,7 @@ static void __kmp_dist_get_bounds(ident_t *loc, kmp_int32 gtid,
__kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
}
}
+ __kmp_assert_valid_gtid(gtid);
th = __kmp_threads[gtid];
team = th->th.th_team;
KMP_DEBUG_ASSERT(th->th.th_teams_microtask); // we are in the teams construct
diff --git a/openmp/runtime/src/kmp_error.cpp b/openmp/runtime/src/kmp_error.cpp
index b30b26e3ab2b..7fc0ce17a05c 100644
--- a/openmp/runtime/src/kmp_error.cpp
+++ b/openmp/runtime/src/kmp_error.cpp
@@ -415,9 +415,6 @@ void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
__kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
&p->stack_data[tos]);
}
- if (gtid < 0) {
- __kmp_check_null_func();
- }
KE_TRACE(100, (POP_MSG(p)));
p->s_top = p->stack_data[tos].prev;
p->stack_data[tos].type = ct_none;
diff --git a/openmp/runtime/src/kmp_sched.cpp b/openmp/runtime/src/kmp_sched.cpp
index 28d0ffe0fb9d..129dc1946b5a 100644
--- a/openmp/runtime/src/kmp_sched.cpp
+++ b/openmp/runtime/src/kmp_sched.cpp
@@ -85,6 +85,7 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
kmp_uint32 nth;
UT trip_count;
kmp_team_t *team;
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *th = __kmp_threads[gtid];
#if OMPT_SUPPORT && OMPT_OPTIONAL
@@ -438,6 +439,7 @@ static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid,
KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
KE_TRACE(10, ("__kmpc_dist_for_static_init called (%d)\n", gtid));
+ __kmp_assert_valid_gtid(gtid);
#ifdef KMP_DEBUG
{
char *buff;
@@ -681,6 +683,7 @@ static void __kmp_team_static_init(ident_t *loc, kmp_int32 gtid,
KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
KE_TRACE(10, ("__kmp_team_static_init called (%d)\n", gtid));
+ __kmp_assert_valid_gtid(gtid);
#ifdef KMP_DEBUG
{
char *buff;
diff --git a/openmp/runtime/src/kmp_taskdeps.cpp b/openmp/runtime/src/kmp_taskdeps.cpp
index a654951f5b3b..9a81196879a8 100644
--- a/openmp/runtime/src/kmp_taskdeps.cpp
+++ b/openmp/runtime/src/kmp_taskdeps.cpp
@@ -514,7 +514,7 @@ kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
loc_ref, new_taskdata));
-
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *thread = __kmp_threads[gtid];
kmp_taskdata_t *current_task = thread->th.th_current_task;
@@ -677,7 +677,7 @@ void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
gtid, loc_ref));
return;
}
-
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *thread = __kmp_threads[gtid];
kmp_taskdata_t *current_task = thread->th.th_current_task;
diff --git a/openmp/runtime/src/kmp_tasking.cpp b/openmp/runtime/src/kmp_tasking.cpp
index c5a3744ad27b..3dfc3c4030d4 100644
--- a/openmp/runtime/src/kmp_tasking.cpp
+++ b/openmp/runtime/src/kmp_tasking.cpp
@@ -984,6 +984,7 @@ static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
kmp_task_t *task) {
KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
+ __kmp_assert_valid_gtid(gtid);
// this routine will provide task to resume
__kmp_task_finish<ompt>(gtid, task, NULL);
@@ -1381,10 +1382,9 @@ kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
kmp_routine_entry_t task_entry) {
kmp_task_t *retval;
kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
-
+ __kmp_assert_valid_gtid(gtid);
input_flags->native = FALSE;
// __kmp_task_alloc() sets up all other runtime flags
-
KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s %s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",
@@ -1716,6 +1716,7 @@ kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
#endif
KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
new_taskdata));
+ __kmp_assert_valid_gtid(gtid);
#if OMPT_SUPPORT
kmp_taskdata_t *parent = NULL;
@@ -1824,6 +1825,7 @@ static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
+ __kmp_assert_valid_gtid(gtid);
if (__kmp_tasking_mode != tskm_immediate_exec) {
thread = __kmp_threads[gtid];
@@ -1953,6 +1955,7 @@ kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
gtid, loc_ref, end_part));
+ __kmp_assert_valid_gtid(gtid);
if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
thread = __kmp_threads[gtid];
@@ -2108,6 +2111,7 @@ void __kmp_call_init<kmp_taskred_input_t>(kmp_taskred_data_t &item,
template <typename T>
void *__kmp_task_reduction_init(int gtid, int num, T *data) {
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *thread = __kmp_threads[gtid];
kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
kmp_int32 nth = thread->th.th_team_nproc;
@@ -2223,6 +2227,7 @@ void __kmp_task_reduction_init_copy(kmp_info_t *thr, int num, T *data,
Get thread-specific location of data item
*/
void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *thread = __kmp_threads[gtid];
kmp_int32 nth = thread->th.th_team_nproc;
if (nth == 1)
@@ -2328,6 +2333,7 @@ static void __kmp_task_reduction_clean(kmp_info_t *th, kmp_taskgroup_t *tg) {
template <typename T>
void *__kmp_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
int num, T *data) {
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *thr = __kmp_threads[gtid];
kmp_int32 nth = thr->th.th_team_nproc;
__kmpc_taskgroup(loc, gtid); // form new taskgroup first
@@ -2423,6 +2429,7 @@ void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws) {
// __kmpc_taskgroup: Start a new taskgroup
void __kmpc_taskgroup(ident_t *loc, int gtid) {
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *thread = __kmp_threads[gtid];
kmp_taskdata_t *taskdata = thread->th.th_current_task;
kmp_taskgroup_t *tg_new =
@@ -2455,6 +2462,7 @@ void __kmpc_taskgroup(ident_t *loc, int gtid) {
// __kmpc_end_taskgroup: Wait until all tasks generated by the current task
// and its descendants are complete
void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
+ __kmp_assert_valid_gtid(gtid);
kmp_info_t *thread = __kmp_threads[gtid];
kmp_taskdata_t *taskdata = thread->th.th_current_task;
kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
@@ -3807,7 +3815,7 @@ void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
KA_TRACE(
10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
gtid, taskdata));
-
+ __kmp_assert_valid_gtid(gtid);
KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
__kmp_first_top_half_finish_proxy(taskdata);
@@ -4439,7 +4447,7 @@ void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
int sched, kmp_uint64 grainsize, void *task_dup) {
kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
KMP_DEBUG_ASSERT(task != NULL);
-
+ __kmp_assert_valid_gtid(gtid);
if (nogroup == 0) {
#if OMPT_SUPPORT && OMPT_OPTIONAL
OMPT_STORE_RETURN_ADDRESS(gtid);
More information about the Openmp-commits
mailing list