[Openmp-commits] [openmp] 681055e - [OpenMP] Remove TSAN annotations from libomp
Joachim Protze via Openmp-commits
openmp-commits at lists.llvm.org
Mon Jul 12 09:50:28 PDT 2021
Author: Joachim Protze
Date: 2021-07-12T18:49:11+02:00
New Revision: 681055ea694b1de21a52b319329e5d4f9b1b807d
URL: https://github.com/llvm/llvm-project/commit/681055ea694b1de21a52b319329e5d4f9b1b807d
DIFF: https://github.com/llvm/llvm-project/commit/681055ea694b1de21a52b319329e5d4f9b1b807d.diff
LOG: [OpenMP] Remove TSAN annotations from libomp
The annotations in libomp were never built by default. The annotations are
also superseded by the annotations which the OMPT tool libarcher.so provides.
With respect to libarcher, libomp behaves as if libarcher would be the last
element of OMP_TOOL_LIBARARIES. I.e., if no other OMPT tool gets active,
libarcher will check if an OpenMP application is built with TSan.
Since libarcher gets loaded by default, enabling LIBOMP_TSAN_SUPPORT would
result in redundant annotations for TSan, which slightly differ in details
and coverage (e.g. task dependencies are not handled well by the annotations
in libomp).
This patch removes all TSan annotations from the OpenMP runtime code.
Differential Revision: https://reviews.llvm.org/D103767
Added:
Modified:
openmp/runtime/CMakeLists.txt
openmp/runtime/src/CMakeLists.txt
openmp/runtime/src/exports_so.txt
openmp/runtime/src/kmp_barrier.cpp
openmp/runtime/src/kmp_config.h.cmake
openmp/runtime/src/kmp_lock.cpp
openmp/runtime/src/kmp_runtime.cpp
openmp/runtime/src/kmp_tasking.cpp
openmp/runtime/src/z_Linux_util.cpp
Removed:
openmp/runtime/src/tsan_annotations.cpp
openmp/runtime/src/tsan_annotations.h
################################################################################
diff --git a/openmp/runtime/CMakeLists.txt b/openmp/runtime/CMakeLists.txt
index 90aab6da3f3f3..b3dbad4ae8946 100644
--- a/openmp/runtime/CMakeLists.txt
+++ b/openmp/runtime/CMakeLists.txt
@@ -322,13 +322,6 @@ if(LIBOMP_OMPT_SUPPORT AND (NOT LIBOMP_HAVE_OMPT_SUPPORT))
libomp_error_say("OpenMP Tools Interface requested but not available in this implementation")
endif()
-# TSAN-support
-set(LIBOMP_TSAN_SUPPORT FALSE CACHE BOOL
- "TSAN-support?")
-if(LIBOMP_TSAN_SUPPORT AND (NOT LIBOMP_HAVE_TSAN_SUPPORT))
- libomp_error_say("TSAN functionality requested but not available")
-endif()
-
# OMPD-support
# Enable if OMPT SUPPORT is ON
set(OMPD_DEFAULT FALSE)
@@ -406,7 +399,6 @@ if(${OPENMP_STANDALONE_BUILD})
libomp_say("Use OMPD-support -- ${LIBOMP_OMPD_SUPPORT}")
libomp_say("Use Adaptive locks -- ${LIBOMP_USE_ADAPTIVE_LOCKS}")
libomp_say("Use quad precision -- ${LIBOMP_USE_QUAD_PRECISION}")
- libomp_say("Use TSAN-support -- ${LIBOMP_TSAN_SUPPORT}")
libomp_say("Use Hwloc library -- ${LIBOMP_USE_HWLOC}")
endif()
diff --git a/openmp/runtime/src/CMakeLists.txt b/openmp/runtime/src/CMakeLists.txt
index fd2cd3cb11f9d..bdb867e352f78 100644
--- a/openmp/runtime/src/CMakeLists.txt
+++ b/openmp/runtime/src/CMakeLists.txt
@@ -114,7 +114,6 @@ libomp_append(LIBOMP_CXXFILES kmp_ftn_cdecl.cpp)
libomp_append(LIBOMP_CXXFILES kmp_ftn_extra.cpp)
libomp_append(LIBOMP_CXXFILES kmp_version.cpp)
libomp_append(LIBOMP_CXXFILES ompt-general.cpp IF_TRUE LIBOMP_OMPT_SUPPORT)
-libomp_append(LIBOMP_CXXFILES tsan_annotations.cpp IF_TRUE LIBOMP_TSAN_SUPPORT)
libomp_append(LIBOMP_CXXFILES ompd-specific.cpp IF_TRUE LIBOMP_OMPD_SUPPORT)
set(LIBOMP_SOURCE_FILES ${LIBOMP_CXXFILES} ${LIBOMP_ASMFILES})
diff --git a/openmp/runtime/src/exports_so.txt b/openmp/runtime/src/exports_so.txt
index 524bf117be0c6..cb79ae72e67b9 100644
--- a/openmp/runtime/src/exports_so.txt
+++ b/openmp/runtime/src/exports_so.txt
@@ -27,9 +27,6 @@ VERSION {
#
ompt_start_tool; # OMPT start interface
- # icc drops weak attribute at linking step without the following line:
- Annotate*; # TSAN annotation
-
ompc_*; # omp.h renames some standard functions to ompc_*.
kmp_*; # Intel extensions.
kmpc_*; # Intel extensions.
diff --git a/openmp/runtime/src/kmp_barrier.cpp b/openmp/runtime/src/kmp_barrier.cpp
index 237d18a73dcd6..93112156a1efa 100644
--- a/openmp/runtime/src/kmp_barrier.cpp
+++ b/openmp/runtime/src/kmp_barrier.cpp
@@ -22,8 +22,6 @@
#define USE_NGO_STORES 1
#endif // KMP_MIC
-#include "tsan_annotations.h"
-
#if KMP_MIC && USE_NGO_STORES
// ICV copying
#define ngo_load(src) __m512d Vt = _mm512_load_pd((void *)(src))
@@ -77,7 +75,6 @@ static bool __kmp_linear_barrier_gather_template(
/* After performing this write, a worker thread may not assume that the team
is valid any more - it could be deallocated by the primary thread at any
time. */
- ANNOTATE_BARRIER_BEGIN(this_thr);
kmp_flag_64<> flag(&thr_bar->b_arrived, other_threads[0]);
flag.release();
} else {
@@ -111,7 +108,6 @@ static bool __kmp_linear_barrier_gather_template(
new_state);
flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
}
- ANNOTATE_BARRIER_END(other_threads[i]);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
// Barrier imbalance - write min of the thread time and the other thread
// time to the thread.
@@ -125,14 +121,11 @@ static bool __kmp_linear_barrier_gather_template(
("__kmp_linear_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(i, team),
team->t.t_id, i));
- ANNOTATE_REDUCE_AFTER(reduce);
OMPT_REDUCTION_DECL(this_thr, gtid);
OMPT_REDUCTION_BEGIN;
(*reduce)(this_thr->th.th_local.reduce_data,
other_threads[i]->th.th_local.reduce_data);
OMPT_REDUCTION_END;
- ANNOTATE_REDUCE_BEFORE(reduce);
- ANNOTATE_REDUCE_BEFORE(&team->t.t_bar);
}
}
// Don't have to worry about sleep bit here or atomic since team setting
@@ -202,7 +195,6 @@ static bool __kmp_linear_barrier_release_template(
team->t.t_id, i, &other_threads[i]->th.th_bar[bt].bb.b_go,
other_threads[i]->th.th_bar[bt].bb.b_go,
other_threads[i]->th.th_bar[bt].bb.b_go + KMP_BARRIER_STATE_BUMP));
- ANNOTATE_BARRIER_BEGIN(other_threads[i]);
kmp_flag_64<> flag(&other_threads[i]->th.th_bar[bt].bb.b_go,
other_threads[i]);
flag.release();
@@ -219,7 +211,6 @@ static bool __kmp_linear_barrier_release_template(
kmp_flag_64<> flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
}
- ANNOTATE_BARRIER_END(this_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
// In a fork barrier; cannot get the object reliably (or ITTNOTIFY is
@@ -338,7 +329,6 @@ static void __kmp_tree_barrier_gather(
// Wait for child to arrive
kmp_flag_64<> flag(&child_bar->b_arrived, new_state);
flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
- ANNOTATE_BARRIER_END(child_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
// Barrier imbalance - write min of the thread time and a child time to
// the thread.
@@ -352,14 +342,11 @@ static void __kmp_tree_barrier_gather(
("__kmp_tree_barrier_gather: T#%d(%d:%d) += T#%d(%d:%u)\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
team->t.t_id, child_tid));
- ANNOTATE_REDUCE_AFTER(reduce);
OMPT_REDUCTION_DECL(this_thr, gtid);
OMPT_REDUCTION_BEGIN;
(*reduce)(this_thr->th.th_local.reduce_data,
child_thr->th.th_local.reduce_data);
OMPT_REDUCTION_END;
- ANNOTATE_REDUCE_BEFORE(reduce);
- ANNOTATE_REDUCE_BEFORE(&team->t.t_bar);
}
child++;
child_tid++;
@@ -380,7 +367,6 @@ static void __kmp_tree_barrier_gather(
/* After performing this write, a worker thread may not assume that the team
is valid any more - it could be deallocated by the primary thread at any
time. */
- ANNOTATE_BARRIER_BEGIN(this_thr);
kmp_flag_64<> flag(&thr_bar->b_arrived, other_threads[parent_tid]);
flag.release();
} else {
@@ -419,7 +405,6 @@ static void __kmp_tree_barrier_release(
// Wait for parent thread to release us
kmp_flag_64<> flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
- ANNOTATE_BARRIER_END(this_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
// In fork barrier where we could not get the object reliably (or
@@ -494,7 +479,6 @@ static void __kmp_tree_barrier_release(
team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
child_bar->b_go + KMP_BARRIER_STATE_BUMP));
// Release child from barrier
- ANNOTATE_BARRIER_BEGIN(child_thr);
kmp_flag_64<> flag(&child_bar->b_go, child_thr);
flag.release();
child++;
@@ -557,7 +541,6 @@ static void __kmp_hyper_barrier_gather(
/* After performing this write (in the last iteration of the enclosing for
loop), a worker thread may not assume that the team is valid any more
- it could be deallocated by the primary thread at any time. */
- ANNOTATE_BARRIER_BEGIN(this_thr);
p_flag.set_waiter(other_threads[parent_tid]);
p_flag.release();
break;
@@ -586,7 +569,6 @@ static void __kmp_hyper_barrier_gather(
// Wait for child to arrive
kmp_flag_64<> c_flag(&child_bar->b_arrived, new_state);
c_flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
- ANNOTATE_BARRIER_END(child_thr);
KMP_MB(); // Synchronize parent and child threads.
#if USE_ITT_BUILD && USE_ITT_NOTIFY
// Barrier imbalance - write min of the thread time and a child time to
@@ -601,14 +583,11 @@ static void __kmp_hyper_barrier_gather(
("__kmp_hyper_barrier_gather: T#%d(%d:%d) += T#%d(%d:%u)\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
team->t.t_id, child_tid));
- ANNOTATE_REDUCE_AFTER(reduce);
OMPT_REDUCTION_DECL(this_thr, gtid);
OMPT_REDUCTION_BEGIN;
(*reduce)(this_thr->th.th_local.reduce_data,
child_thr->th.th_local.reduce_data);
OMPT_REDUCTION_END;
- ANNOTATE_REDUCE_BEFORE(reduce);
- ANNOTATE_REDUCE_BEFORE(&team->t.t_bar);
}
}
}
@@ -668,7 +647,6 @@ static void __kmp_hyper_barrier_release(
// Wait for parent thread to release us
kmp_flag_64<> flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
- ANNOTATE_BARRIER_END(this_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
// In fork barrier where we could not get the object reliably
@@ -767,7 +745,6 @@ static void __kmp_hyper_barrier_release(
team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
child_bar->b_go + KMP_BARRIER_STATE_BUMP));
// Release child from barrier
- ANNOTATE_BARRIER_BEGIN(child_thr);
kmp_flag_64<> flag(&child_bar->b_go, child_thr);
flag.release();
}
@@ -918,7 +895,6 @@ static void __kmp_hierarchical_barrier_gather(
kmp_flag_64<> flag(&thr_bar->b_arrived, leaf_state);
flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
if (reduce) {
- ANNOTATE_REDUCE_AFTER(reduce);
OMPT_REDUCTION_DECL(this_thr, gtid);
OMPT_REDUCTION_BEGIN;
for (child_tid = tid + 1; child_tid <= tid + thr_bar->leaf_kids;
@@ -928,13 +904,10 @@ static void __kmp_hierarchical_barrier_gather(
gtid, team->t.t_id, tid,
__kmp_gtid_from_tid(child_tid, team), team->t.t_id,
child_tid));
- ANNOTATE_BARRIER_END(other_threads[child_tid]);
(*reduce)(this_thr->th.th_local.reduce_data,
other_threads[child_tid]->th.th_local.reduce_data);
}
OMPT_REDUCTION_END;
- ANNOTATE_REDUCE_BEFORE(reduce);
- ANNOTATE_REDUCE_BEFORE(&team->t.t_bar);
}
// clear leaf_state bits
KMP_TEST_THEN_AND64(&thr_bar->b_arrived, ~(thr_bar->leaf_state));
@@ -957,18 +930,14 @@ static void __kmp_hierarchical_barrier_gather(
child_tid, &child_bar->b_arrived, new_state));
kmp_flag_64<> flag(&child_bar->b_arrived, new_state);
flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
- ANNOTATE_BARRIER_END(child_thr);
if (reduce) {
KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += "
"T#%d(%d:%d)\n",
gtid, team->t.t_id, tid,
__kmp_gtid_from_tid(child_tid, team), team->t.t_id,
child_tid));
- ANNOTATE_REDUCE_AFTER(reduce);
(*reduce)(this_thr->th.th_local.reduce_data,
child_thr->th.th_local.reduce_data);
- ANNOTATE_REDUCE_BEFORE(reduce);
- ANNOTATE_REDUCE_BEFORE(&team->t.t_bar);
}
}
}
@@ -990,18 +959,14 @@ static void __kmp_hierarchical_barrier_gather(
child_tid, &child_bar->b_arrived, new_state));
kmp_flag_64<> flag(&child_bar->b_arrived, new_state);
flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
- ANNOTATE_BARRIER_END(child_thr);
if (reduce) {
KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += "
"T#%d(%d:%d)\n",
gtid, team->t.t_id, tid,
__kmp_gtid_from_tid(child_tid, team), team->t.t_id,
child_tid));
- ANNOTATE_REDUCE_AFTER(reduce);
(*reduce)(this_thr->th.th_local.reduce_data,
child_thr->th.th_local.reduce_data);
- ANNOTATE_REDUCE_BEFORE(reduce);
- ANNOTATE_REDUCE_BEFORE(&team->t.t_bar);
}
}
}
@@ -1022,7 +987,6 @@ static void __kmp_hierarchical_barrier_gather(
if (thr_bar->my_level || __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME ||
!thr_bar->use_oncore_barrier) { // Parent is waiting on my b_arrived
// flag; release it
- ANNOTATE_BARRIER_BEGIN(this_thr);
kmp_flag_64<> flag(&thr_bar->b_arrived,
other_threads[thr_bar->parent_tid]);
flag.release();
@@ -1071,7 +1035,6 @@ static void __kmp_hierarchical_barrier_release(
thr_bar->wait_flag = KMP_BARRIER_OWN_FLAG;
kmp_flag_64<> flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
- ANNOTATE_BARRIER_END(this_thr);
TCW_8(thr_bar->b_go,
KMP_INIT_BARRIER_STATE); // Reset my b_go flag for next time
} else { // Thread barrier data is initialized, this is a leaf, blocktime is
@@ -1217,7 +1180,6 @@ static void __kmp_hierarchical_barrier_release(
team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
child_bar->b_go + KMP_BARRIER_STATE_BUMP));
// Release child using child's b_go flag
- ANNOTATE_BARRIER_BEGIN(child_thr);
kmp_flag_64<> flag(&child_bar->b_go, child_thr);
flag.release();
}
@@ -1243,7 +1205,6 @@ static void __kmp_hierarchical_barrier_release(
child_tid, &child_bar->b_go, child_bar->b_go,
child_bar->b_go + KMP_BARRIER_STATE_BUMP));
// Release child using child's b_go flag
- ANNOTATE_BARRIER_BEGIN(child_thr);
kmp_flag_64<> flag(&child_bar->b_go, child_thr);
flag.release();
}
@@ -1311,7 +1272,6 @@ static int __kmp_barrier_template(enum barrier_type bt, int gtid, int is_split,
KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) has arrived\n", gtid,
__kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid)));
- ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
#if OMPT_SUPPORT
if (ompt_enabled.enabled) {
#if OMPT_OPTIONAL
@@ -1587,7 +1547,6 @@ static int __kmp_barrier_template(enum barrier_type bt, int gtid, int is_split,
this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
}
#endif
- ANNOTATE_BARRIER_END(&team->t.t_bar);
if (cancellable)
return (int)cancelled;
@@ -1634,7 +1593,6 @@ void __kmp_end_split_barrier(enum barrier_type bt, int gtid) {
kmp_info_t *this_thr = __kmp_threads[gtid];
kmp_team_t *team = this_thr->th.th_team;
- ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
if (!team->t.t_serialized) {
if (KMP_MASTER_GTID(gtid)) {
switch (__kmp_barrier_release_pattern[bt]) {
@@ -1665,7 +1623,6 @@ void __kmp_end_split_barrier(enum barrier_type bt, int gtid) {
} // if
}
}
- ANNOTATE_BARRIER_END(&team->t.t_bar);
}
void __kmp_join_barrier(int gtid) {
@@ -1716,7 +1673,6 @@ void __kmp_join_barrier(int gtid) {
KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) arrived at join barrier\n",
gtid, team_id, tid));
- ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
#if OMPT_SUPPORT
if (ompt_enabled.enabled) {
#if OMPT_OPTIONAL
@@ -1904,7 +1860,6 @@ void __kmp_join_barrier(int gtid) {
KA_TRACE(10,
("__kmp_join_barrier: T#%d(%d:%d) leaving\n", gtid, team_id, tid));
- ANNOTATE_BARRIER_END(&team->t.t_bar);
}
// TODO release worker threads' fork barriers as we are ready instead of all at
@@ -1918,7 +1873,6 @@ void __kmp_fork_barrier(int gtid, int tid) {
void *itt_sync_obj = NULL;
#endif /* USE_ITT_BUILD */
if (team)
- ANNOTATE_BARRIER_END(&team->t.t_bar);
KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) has arrived\n", gtid,
(team != NULL) ? team->t.t_id : -1, tid));
@@ -2130,7 +2084,6 @@ void __kmp_fork_barrier(int gtid, int tid) {
} // (prepare called inside barrier_release)
}
#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
- ANNOTATE_BARRIER_END(&team->t.t_bar);
KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) is leaving\n", gtid,
team->t.t_id, tid));
}
diff --git a/openmp/runtime/src/kmp_config.h.cmake b/openmp/runtime/src/kmp_config.h.cmake
index f092efe67ad74..0b07d115ff7b9 100644
--- a/openmp/runtime/src/kmp_config.h.cmake
+++ b/openmp/runtime/src/kmp_config.h.cmake
@@ -68,10 +68,6 @@
#define KMP_LIBRARY_FILE "@LIBOMP_LIB_FILE@"
#define KMP_VERSION_MAJOR @LIBOMP_VERSION_MAJOR@
#define KMP_VERSION_MINOR @LIBOMP_VERSION_MINOR@
-#cmakedefine01 LIBOMP_TSAN_SUPPORT
-#if LIBOMP_TSAN_SUPPORT
-#define TSAN_SUPPORT
-#endif
#cmakedefine01 MSVC
#define KMP_MSVC_COMPAT MSVC
#cmakedefine01 LIBOMP_HAVE_WAITPKG_INTRINSICS
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp
index 23d180ad50ff4..59726f2b9f21c 100644
--- a/openmp/runtime/src/kmp_lock.cpp
+++ b/openmp/runtime/src/kmp_lock.cpp
@@ -21,8 +21,6 @@
#include "kmp_wait_release.h"
#include "kmp_wrapper_getpid.h"
-#include "tsan_annotations.h"
-
#if KMP_USE_FUTEX
#include <sys/syscall.h>
#include <unistd.h>
@@ -112,7 +110,6 @@ __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid);
- ANNOTATE_TAS_ACQUIRED(lck);
return retval;
}
@@ -154,7 +151,6 @@ int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
KMP_MB(); /* Flush all pending memory write invalidates. */
KMP_FSYNC_RELEASING(lck);
- ANNOTATE_TAS_RELEASED(lck);
KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas));
KMP_MB(); /* Flush all pending memory write invalidates. */
@@ -208,7 +204,6 @@ int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
return KMP_LOCK_ACQUIRED_NEXT;
} else {
__kmp_acquire_tas_lock_timed_template(lck, gtid);
- ANNOTATE_TAS_ACQUIRED(lck);
lck->lk.depth_locked = 1;
return KMP_LOCK_ACQUIRED_FIRST;
}
@@ -398,7 +393,6 @@ __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) {
int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid);
- ANNOTATE_FUTEX_ACQUIRED(lck);
return retval;
}
@@ -441,7 +435,6 @@ int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
lck, lck->lk.poll, gtid));
KMP_FSYNC_RELEASING(lck);
- ANNOTATE_FUTEX_RELEASED(lck);
kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
@@ -512,7 +505,6 @@ int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
return KMP_LOCK_ACQUIRED_NEXT;
} else {
__kmp_acquire_futex_lock_timed_template(lck, gtid);
- ANNOTATE_FUTEX_ACQUIRED(lck);
lck->lk.depth_locked = 1;
return KMP_LOCK_ACQUIRED_FIRST;
}
@@ -644,7 +636,6 @@ __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid);
- ANNOTATE_TICKET_ACQUIRED(lck);
return retval;
}
@@ -719,7 +710,6 @@ int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
std::atomic_load_explicit(&lck->lk.now_serving,
std::memory_order_relaxed);
- ANNOTATE_TICKET_RELEASED(lck);
std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
std::memory_order_release);
@@ -814,7 +804,6 @@ int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
return KMP_LOCK_ACQUIRED_NEXT;
} else {
__kmp_acquire_ticket_lock_timed_template(lck, gtid);
- ANNOTATE_TICKET_ACQUIRED(lck);
std::atomic_store_explicit(&lck->lk.depth_locked, 1,
std::memory_order_relaxed);
std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
@@ -1282,7 +1271,6 @@ int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
KMP_DEBUG_ASSERT(gtid >= 0);
int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
- ANNOTATE_QUEUING_ACQUIRED(lck);
return retval;
}
@@ -1328,7 +1316,6 @@ int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
KA_TRACE(1000,
("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
KMP_FSYNC_ACQUIRED(lck);
- ANNOTATE_QUEUING_ACQUIRED(lck);
return TRUE;
}
}
@@ -1378,7 +1365,6 @@ int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
KMP_FSYNC_RELEASING(lck);
- ANNOTATE_QUEUING_RELEASED(lck);
while (1) {
kmp_int32 dequeued;
@@ -1567,7 +1553,6 @@ int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
return KMP_LOCK_ACQUIRED_NEXT;
} else {
__kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
- ANNOTATE_QUEUING_ACQUIRED(lck);
KMP_MB();
lck->lk.depth_locked = 1;
KMP_MB();
@@ -2124,7 +2109,6 @@ static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
__kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid);
// We have acquired the base lock, so count that.
KMP_INC_STAT(lck, nonSpeculativeAcquires);
- ANNOTATE_QUEUING_ACQUIRED(lck);
}
static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
@@ -2357,7 +2341,6 @@ __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
- ANNOTATE_DRDPA_ACQUIRED(lck);
return retval;
}
@@ -2434,7 +2417,6 @@ int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
ticket - 1, lck));
KMP_FSYNC_RELEASING(lck);
- ANNOTATE_DRDPA_RELEASED(lck);
polls[ticket & mask] = ticket; // atomic store
return KMP_LOCK_RELEASED;
}
@@ -2521,7 +2503,6 @@ int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
return KMP_LOCK_ACQUIRED_NEXT;
} else {
__kmp_acquire_drdpa_lock_timed_template(lck, gtid);
- ANNOTATE_DRDPA_ACQUIRED(lck);
KMP_MB();
lck->lk.depth_locked = 1;
KMP_MB();
@@ -3844,15 +3825,11 @@ kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, kmp_int32 gtid,
if (__kmp_lock_pool == NULL) {
// Lock pool is empty. Allocate new memory.
- // ANNOTATION: Found no good way to express the syncronisation
- // between allocation and usage, so ignore the allocation
- ANNOTATE_IGNORE_WRITES_BEGIN();
if (__kmp_num_locks_in_block <= 1) { // Tune this cutoff point.
lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
} else {
lck = __kmp_lock_block_allocate();
}
- ANNOTATE_IGNORE_WRITES_END();
// Insert lock in the table so that it can be freed in __kmp_cleanup,
// and debugger has info on all allocated locks.
diff --git a/openmp/runtime/src/kmp_runtime.cpp b/openmp/runtime/src/kmp_runtime.cpp
index f6a53825f2d10..1f1025a72e8c6 100644
--- a/openmp/runtime/src/kmp_runtime.cpp
+++ b/openmp/runtime/src/kmp_runtime.cpp
@@ -47,8 +47,6 @@ static char *ProfileTraceFile = nullptr;
#include <process.h>
#endif
-#include "tsan_annotations.h"
-
#if KMP_OS_WINDOWS
// windows does not need include files as it doesn't use shared memory
#else
@@ -5959,7 +5957,6 @@ static void __kmp_reap_thread(kmp_info_t *thread, int is_root) {
gtid));
/* Need release fence here to prevent seg faults for tree forkjoin barrier
* (GEH) */
- ANNOTATE_HAPPENS_BEFORE(thread);
kmp_flag_64<> flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go,
thread);
__kmp_release_64(&flag);
diff --git a/openmp/runtime/src/kmp_tasking.cpp b/openmp/runtime/src/kmp_tasking.cpp
index 7dfd256801b5a..fe15cb3dd9cfb 100644
--- a/openmp/runtime/src/kmp_tasking.cpp
+++ b/openmp/runtime/src/kmp_tasking.cpp
@@ -21,8 +21,6 @@
#include "ompt-specific.h"
#endif
-#include "tsan_annotations.h"
-
/* forward declaration */
static void __kmp_enable_tasking(kmp_task_team_t *task_team,
kmp_info_t *this_thr);
@@ -734,7 +732,6 @@ static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
taskdata->td_flags.freed = 1;
- ANNOTATE_HAPPENS_BEFORE(taskdata);
// deallocate the taskdata and shared variable blocks associated with this task
#if USE_FAST_MEMORY
__kmp_fast_free(thread, taskdata);
@@ -1305,7 +1302,6 @@ kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(
encountering_thread, shareds_offset + sizeof_shareds);
#endif /* USE_FAST_MEMORY */
- ANNOTATE_HAPPENS_AFTER(taskdata);
task = KMP_TASKDATA_TO_TASK(taskdata);
@@ -1416,7 +1412,6 @@ kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
gtid, taskdata, taskdata->td_parent));
- ANNOTATE_HAPPENS_BEFORE(task);
return task;
}
@@ -1537,7 +1532,6 @@ static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
// Proxy tasks are not handled by the runtime
if (taskdata->td_flags.proxy != TASK_PROXY) {
- ANNOTATE_HAPPENS_AFTER(task);
__kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
}
@@ -1653,7 +1647,6 @@ static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
// Proxy tasks are not handled by the runtime
if (taskdata->td_flags.proxy != TASK_PROXY) {
- ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent);
#if OMPT_SUPPORT
if (UNLIKELY(ompt_enabled.enabled)) {
thread->th.ompt_thread_info = oldInfo;
@@ -1719,7 +1712,6 @@ kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
"loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
gtid, loc_ref, new_taskdata));
- ANNOTATE_HAPPENS_BEFORE(new_task);
#if OMPT_SUPPORT
if (UNLIKELY(ompt_enabled.enabled)) {
parent->ompt_task_info.frame.enter_frame = ompt_data_none;
@@ -1754,7 +1746,6 @@ kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
__kmp_invoke_task(gtid, new_task, current_task);
}
- ANNOTATE_HAPPENS_BEFORE(new_task);
return TASK_CURRENT_NOT_QUEUED;
}
@@ -1980,7 +1971,6 @@ static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
}
#endif // OMPT_SUPPORT && OMPT_OPTIONAL
- ANNOTATE_HAPPENS_AFTER(taskdata);
}
KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
@@ -2663,7 +2653,6 @@ void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
gtid, taskdata));
- ANNOTATE_HAPPENS_AFTER(taskdata);
#if OMPT_SUPPORT && OMPT_OPTIONAL
if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
@@ -3365,10 +3354,8 @@ static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
// Make the initial allocate for threads_data array, and zero entries
// Cannot use __kmp_thread_calloc() because threads not around for
// kmp_reap_task_team( ).
- ANNOTATE_IGNORE_WRITES_BEGIN();
*threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
nthreads * sizeof(kmp_thread_data_t));
- ANNOTATE_IGNORE_WRITES_END();
#ifdef BUILD_TIED_TASK_STACK
// GEH: Figure out if this is the right thing to do
for (i = 0; i < nthreads; i++) {
diff --git a/openmp/runtime/src/tsan_annotations.cpp b/openmp/runtime/src/tsan_annotations.cpp
deleted file mode 100644
index 5be17f8337ce2..0000000000000
--- a/openmp/runtime/src/tsan_annotations.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * tsan_annotations.cpp -- ThreadSanitizer annotations to support data
- * race detection in OpenMP programs.
- */
-
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "tsan_annotations.h"
-
-#include <stdio.h>
-
-typedef unsigned long uptr;
-typedef signed long sptr;
-
-extern "C" __attribute__((weak)) void AnnotateHappensBefore(const char *f,
- int l, uptr addr) {}
-extern "C" __attribute__((weak)) void AnnotateHappensAfter(const char *f, int l,
- uptr addr) {}
-extern "C" __attribute__((weak)) void AnnotateCondVarSignal(const char *f,
- int l, uptr cv) {}
-extern "C" __attribute__((weak)) void AnnotateCondVarSignalAll(const char *f,
- int l, uptr cv) {
-}
-extern "C" __attribute__((weak)) void AnnotateMutexIsNotPHB(const char *f,
- int l, uptr mu) {}
-extern "C" __attribute__((weak)) void AnnotateCondVarWait(const char *f, int l,
- uptr cv, uptr lock) {}
-extern "C" __attribute__((weak)) void AnnotateRWLockCreate(const char *f, int l,
- uptr m) {}
-extern "C" __attribute__((weak)) void
-AnnotateRWLockCreateStatic(const char *f, int l, uptr m) {}
-extern "C" __attribute__((weak)) void AnnotateRWLockDestroy(const char *f,
- int l, uptr m) {}
-extern "C" __attribute__((weak)) void
-AnnotateRWLockAcquired(const char *f, int l, uptr m, uptr is_w) {}
-extern "C" __attribute__((weak)) void
-AnnotateRWLockReleased(const char *f, int l, uptr m, uptr is_w) {}
-extern "C" __attribute__((weak)) void AnnotateTraceMemory(const char *f, int l,
- uptr mem) {}
-extern "C" __attribute__((weak)) void AnnotateFlushState(const char *f, int l) {
-}
-extern "C" __attribute__((weak)) void AnnotateNewMemory(const char *f, int l,
- uptr mem, uptr size) {}
-extern "C" __attribute__((weak)) void AnnotateNoOp(const char *f, int l,
- uptr mem) {}
-extern "C" __attribute__((weak)) void AnnotateFlushExpectedRaces(const char *f,
- int l) {}
-extern "C" __attribute__((weak)) void
-AnnotateEnableRaceDetection(const char *f, int l, int enable) {}
-extern "C" __attribute__((weak)) void
-AnnotateMutexIsUsedAsCondVar(const char *f, int l, uptr mu) {}
-extern "C" __attribute__((weak)) void AnnotatePCQGet(const char *f, int l,
- uptr pcq) {}
-extern "C" __attribute__((weak)) void AnnotatePCQPut(const char *f, int l,
- uptr pcq) {}
-extern "C" __attribute__((weak)) void AnnotatePCQDestroy(const char *f, int l,
- uptr pcq) {}
-extern "C" __attribute__((weak)) void AnnotatePCQCreate(const char *f, int l,
- uptr pcq) {}
-extern "C" __attribute__((weak)) void AnnotateExpectRace(const char *f, int l,
- uptr mem, char *desc) {
-}
-extern "C" __attribute__((weak)) void
-AnnotateBenignRaceSized(const char *f, int l, uptr mem, uptr size, char *desc) {
-}
-extern "C" __attribute__((weak)) void AnnotateBenignRace(const char *f, int l,
- uptr mem, char *desc) {
-}
-extern "C" __attribute__((weak)) void AnnotateIgnoreReadsBegin(const char *f,
- int l) {}
-extern "C" __attribute__((weak)) void AnnotateIgnoreReadsEnd(const char *f,
- int l) {}
-extern "C" __attribute__((weak)) void AnnotateIgnoreWritesBegin(const char *f,
- int l) {}
-extern "C" __attribute__((weak)) void AnnotateIgnoreWritesEnd(const char *f,
- int l) {}
-extern "C" __attribute__((weak)) void AnnotateIgnoreSyncBegin(const char *f,
- int l) {}
-extern "C" __attribute__((weak)) void AnnotateIgnoreSyncEnd(const char *f,
- int l) {}
-extern "C" __attribute__((weak)) void
-AnnotatePublishMemoryRange(const char *f, int l, uptr addr, uptr size) {}
-extern "C" __attribute__((weak)) void
-AnnotateUnpublishMemoryRange(const char *f, int l, uptr addr, uptr size) {}
-extern "C" __attribute__((weak)) void AnnotateThreadName(const char *f, int l,
- char *name) {}
-extern "C" __attribute__((weak)) void
-WTFAnnotateHappensBefore(const char *f, int l, uptr addr) {}
-extern "C" __attribute__((weak)) void
-WTFAnnotateHappensAfter(const char *f, int l, uptr addr) {}
-extern "C" __attribute__((weak)) void
-WTFAnnotateBenignRaceSized(const char *f, int l, uptr mem, uptr sz,
- char *desc) {}
-extern "C" __attribute__((weak)) int RunningOnValgrind() { return 0; }
-extern "C" __attribute__((weak)) double ValgrindSlowdown(void) { return 0; }
-extern "C" __attribute__((weak)) const char __attribute__((weak)) *
- ThreadSanitizerQuery(const char *query) {
- return 0;
-}
-extern "C" __attribute__((weak)) void
-AnnotateMemoryIsInitialized(const char *f, int l, uptr mem, uptr sz) {}
diff --git a/openmp/runtime/src/tsan_annotations.h b/openmp/runtime/src/tsan_annotations.h
deleted file mode 100644
index 2b1debbcad4e9..0000000000000
--- a/openmp/runtime/src/tsan_annotations.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*! \file */
-/*
- * tsan_annotations.h -- ThreadSanitizer annotations to support data
- * race detection in OpenMP programs.
- */
-
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef TSAN_ANNOTATIONS_H
-#define TSAN_ANNOTATIONS_H
-
-#include "kmp_config.h"
-
-/* types as used in tsan/rtl/tsan_interface_ann.cc */
-typedef unsigned long uptr;
-typedef signed long sptr;
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Declaration of all annotation functions in tsan/rtl/tsan_interface_ann.cc */
-void AnnotateHappensBefore(const char *f, int l, uptr addr);
-void AnnotateHappensAfter(const char *f, int l, uptr addr);
-void AnnotateCondVarSignal(const char *f, int l, uptr cv);
-void AnnotateCondVarSignalAll(const char *f, int l, uptr cv);
-void AnnotateMutexIsNotPHB(const char *f, int l, uptr mu);
-void AnnotateCondVarWait(const char *f, int l, uptr cv, uptr lock);
-void AnnotateRWLockCreate(const char *f, int l, uptr m);
-void AnnotateRWLockCreateStatic(const char *f, int l, uptr m);
-void AnnotateRWLockDestroy(const char *f, int l, uptr m);
-void AnnotateRWLockAcquired(const char *f, int l, uptr m, uptr is_w);
-void AnnotateRWLockReleased(const char *f, int l, uptr m, uptr is_w);
-void AnnotateTraceMemory(const char *f, int l, uptr mem);
-void AnnotateFlushState(const char *f, int l);
-void AnnotateNewMemory(const char *f, int l, uptr mem, uptr size);
-void AnnotateNoOp(const char *f, int l, uptr mem);
-void AnnotateFlushExpectedRaces(const char *f, int l);
-void AnnotateEnableRaceDetection(const char *f, int l, int enable);
-void AnnotateMutexIsUsedAsCondVar(const char *f, int l, uptr mu);
-void AnnotatePCQGet(const char *f, int l, uptr pcq);
-void AnnotatePCQPut(const char *f, int l, uptr pcq);
-void AnnotatePCQDestroy(const char *f, int l, uptr pcq);
-void AnnotatePCQCreate(const char *f, int l, uptr pcq);
-void AnnotateExpectRace(const char *f, int l, uptr mem, char *desc);
-void AnnotateBenignRaceSized(const char *f, int l, uptr mem, uptr size,
- char *desc);
-void AnnotateBenignRace(const char *f, int l, uptr mem, char *desc);
-void AnnotateIgnoreReadsBegin(const char *f, int l);
-void AnnotateIgnoreReadsEnd(const char *f, int l);
-void AnnotateIgnoreWritesBegin(const char *f, int l);
-void AnnotateIgnoreWritesEnd(const char *f, int l);
-void AnnotateIgnoreSyncBegin(const char *f, int l);
-void AnnotateIgnoreSyncEnd(const char *f, int l);
-void AnnotatePublishMemoryRange(const char *f, int l, uptr addr, uptr size);
-void AnnotateUnpublishMemoryRange(const char *f, int l, uptr addr, uptr size);
-void AnnotateThreadName(const char *f, int l, char *name);
-void WTFAnnotateHappensBefore(const char *f, int l, uptr addr);
-void WTFAnnotateHappensAfter(const char *f, int l, uptr addr);
-void WTFAnnotateBenignRaceSized(const char *f, int l, uptr mem, uptr sz,
- char *desc);
-int RunningOnValgrind();
-double ValgrindSlowdown(void);
-const char *ThreadSanitizerQuery(const char *query);
-void AnnotateMemoryIsInitialized(const char *f, int l, uptr mem, uptr sz);
-
-#ifdef __cplusplus
-}
-#endif
-
-#ifdef TSAN_SUPPORT
-#define ANNOTATE_HAPPENS_AFTER(addr) \
- AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr)
-#define ANNOTATE_HAPPENS_BEFORE(addr) \
- AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr)
-#define ANNOTATE_IGNORE_WRITES_BEGIN() \
- AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
-#define ANNOTATE_IGNORE_WRITES_END() AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
-#define ANNOTATE_RWLOCK_CREATE(lck) \
- AnnotateRWLockCreate(__FILE__, __LINE__, (uptr)lck)
-#define ANNOTATE_RWLOCK_RELEASED(lck) \
- AnnotateRWLockAcquired(__FILE__, __LINE__, (uptr)lck, 1)
-#define ANNOTATE_RWLOCK_ACQUIRED(lck) \
- AnnotateRWLockReleased(__FILE__, __LINE__, (uptr)lck, 1)
-#define ANNOTATE_BARRIER_BEGIN(addr) \
- AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr)
-#define ANNOTATE_BARRIER_END(addr) \
- AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr)
-#define ANNOTATE_REDUCE_AFTER(addr) \
- AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr)
-#define ANNOTATE_REDUCE_BEFORE(addr) \
- AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr)
-#else
-#define ANNOTATE_HAPPENS_AFTER(addr)
-#define ANNOTATE_HAPPENS_BEFORE(addr)
-#define ANNOTATE_IGNORE_WRITES_BEGIN()
-#define ANNOTATE_IGNORE_WRITES_END()
-#define ANNOTATE_RWLOCK_CREATE(lck)
-#define ANNOTATE_RWLOCK_RELEASED(lck)
-#define ANNOTATE_RWLOCK_ACQUIRED(lck)
-#define ANNOTATE_BARRIER_BEGIN(addr)
-#define ANNOTATE_BARRIER_END(addr)
-#define ANNOTATE_REDUCE_AFTER(addr)
-#define ANNOTATE_REDUCE_BEFORE(addr)
-#endif
-
-#define ANNOTATE_QUEUING
-#define ANNOTATE_TICKET
-#define ANNOTATE_FUTEX
-#define ANNOTATE_TAS
-#define ANNOTATE_DRDPA
-
-#ifdef ANNOTATE_QUEUING
-#define ANNOTATE_QUEUING_CREATE(lck)
-#define ANNOTATE_QUEUING_RELEASED(lck) ANNOTATE_HAPPENS_BEFORE(lck)
-#define ANNOTATE_QUEUING_ACQUIRED(lck) ANNOTATE_HAPPENS_AFTER(lck)
-#else
-#define ANNOTATE_QUEUING_CREATE(lck)
-#define ANNOTATE_QUEUING_RELEASED(lck)
-#define ANNOTATE_QUEUING_ACQUIRED(lck)
-#endif
-
-#ifdef ANNOTATE_TICKET
-#define ANNOTATE_TICKET_CREATE(lck)
-#define ANNOTATE_TICKET_RELEASED(lck) ANNOTATE_HAPPENS_BEFORE(lck)
-#define ANNOTATE_TICKET_ACQUIRED(lck) ANNOTATE_HAPPENS_AFTER(lck)
-#else
-#define ANNOTATE_TICKET_CREATE(lck)
-#define ANNOTATE_TICKET_RELEASED(lck)
-#define ANNOTATE_TICKET_ACQUIRED(lck)
-#endif
-
-#ifdef ANNOTATE_FUTEX
-#define ANNOTATE_FUTEX_CREATE(lck)
-#define ANNOTATE_FUTEX_RELEASED(lck) ANNOTATE_HAPPENS_BEFORE(lck)
-#define ANNOTATE_FUTEX_ACQUIRED(lck) ANNOTATE_HAPPENS_AFTER(lck)
-#else
-#define ANNOTATE_FUTEX_CREATE(lck)
-#define ANNOTATE_FUTEX_RELEASED(lck)
-#define ANNOTATE_FUTEX_ACQUIRED(lck)
-#endif
-
-#ifdef ANNOTATE_TAS
-#define ANNOTATE_TAS_CREATE(lck)
-#define ANNOTATE_TAS_RELEASED(lck) ANNOTATE_HAPPENS_BEFORE(lck)
-#define ANNOTATE_TAS_ACQUIRED(lck) ANNOTATE_HAPPENS_AFTER(lck)
-#else
-#define ANNOTATE_TAS_CREATE(lck)
-#define ANNOTATE_TAS_RELEASED(lck)
-#define ANNOTATE_TAS_ACQUIRED(lck)
-#endif
-
-#ifdef ANNOTATE_DRDPA
-#define ANNOTATE_DRDPA_CREATE(lck)
-#define ANNOTATE_DRDPA_RELEASED(lck) ANNOTATE_HAPPENS_BEFORE(lck)
-#define ANNOTATE_DRDPA_ACQUIRED(lck) ANNOTATE_HAPPENS_AFTER(lck)
-#else
-#define ANNOTATE_DRDPA_CREATE(lck)
-#define ANNOTATE_DRDPA_RELEASED(lck)
-#define ANNOTATE_DRDPA_ACQUIRED(lck)
-#endif
-
-#endif
diff --git a/openmp/runtime/src/z_Linux_util.cpp b/openmp/runtime/src/z_Linux_util.cpp
index 5c2486904a76e..42ad1d56f9ec0 100644
--- a/openmp/runtime/src/z_Linux_util.cpp
+++ b/openmp/runtime/src/z_Linux_util.cpp
@@ -66,8 +66,6 @@
#include <dirent.h>
#include <fcntl.h>
-#include "tsan_annotations.h"
-
struct kmp_sys_timer {
struct timespec start;
};
@@ -1328,7 +1326,6 @@ void __kmp_suspend_initialize(void) {
}
void __kmp_suspend_initialize_thread(kmp_info_t *th) {
- ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count);
int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
int new_value = __kmp_fork_count + 1;
// Return if already initialized
@@ -1350,7 +1347,6 @@ void __kmp_suspend_initialize_thread(kmp_info_t *th) {
&__kmp_suspend_mutex_attr);
KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
- ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count);
}
}
More information about the Openmp-commits
mailing list