[Openmp-commits] [openmp] r295158 - [OpenMP] New Tsan annotations to remove false positive on reduction and barriers

Jonas Hahnfeld via Openmp-commits openmp-commits at lists.llvm.org
Wed Feb 15 00:14:23 PST 2017


Author: hahnfeld
Date: Wed Feb 15 02:14:22 2017
New Revision: 295158

URL: http://llvm.org/viewvc/llvm-project?rev=295158&view=rev
Log:
[OpenMP] New Tsan annotations to remove false positive on reduction and barriers

Added new ThreadSanitizer annotations to remove false positives with OpenMP reduction.
Cleaned up Tsan annotations header file from unused annotations.

Patch by Simone Atzeni!

Differential Revision: https://reviews.llvm.org/D29202

Modified:
    openmp/trunk/runtime/src/kmp_barrier.cpp
    openmp/trunk/runtime/src/tsan_annotations.h

Modified: openmp/trunk/runtime/src/kmp_barrier.cpp
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_barrier.cpp?rev=295158&r1=295157&r2=295158&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_barrier.cpp (original)
+++ openmp/trunk/runtime/src/kmp_barrier.cpp Wed Feb 15 02:14:22 2017
@@ -74,6 +74,7 @@ __kmp_linear_barrier_gather(enum barrier
         // Mark arrival to master thread
         /* After performing this write, a worker thread may not assume that the team is valid
            any more - it could be deallocated by the master thread at any time. */
+        ANNOTATE_BARRIER_BEGIN(this_thr);
         kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[0]);
         flag.release();
     } else {
@@ -99,6 +100,7 @@ __kmp_linear_barrier_gather(enum barrier
             kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state);
             flag.wait(this_thr, FALSE
                       USE_ITT_BUILD_ARG(itt_sync_obj) );
+            ANNOTATE_BARRIER_END(other_threads[i]);
 #if USE_ITT_BUILD && USE_ITT_NOTIFY
             // Barrier imbalance - write min of the thread time and the other thread time to the thread.
             if (__kmp_forkjoin_frames_mode == 2) {
@@ -175,6 +177,7 @@ __kmp_linear_barrier_release(enum barrie
                               &other_threads[i]->th.th_bar[bt].bb.b_go,
                               other_threads[i]->th.th_bar[bt].bb.b_go,
                               other_threads[i]->th.th_bar[bt].bb.b_go + KMP_BARRIER_STATE_BUMP));
+                ANNOTATE_BARRIER_BEGIN(other_threads[i]);
                 kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_go, other_threads[i]);
                 flag.release();
             }
@@ -185,6 +188,7 @@ __kmp_linear_barrier_release(enum barrie
         kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
         flag.wait(this_thr, TRUE
                   USE_ITT_BUILD_ARG(itt_sync_obj) );
+        ANNOTATE_BARRIER_END(this_thr);
 #if USE_ITT_BUILD && USE_ITT_NOTIFY
         if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
             // In a fork barrier; cannot get the object reliably (or ITTNOTIFY is disabled)
@@ -268,6 +272,7 @@ __kmp_tree_barrier_gather(enum barrier_t
             kmp_flag_64 flag(&child_bar->b_arrived, new_state);
             flag.wait(this_thr, FALSE
                       USE_ITT_BUILD_ARG(itt_sync_obj) );
+            ANNOTATE_BARRIER_END(child_thr);
 #if USE_ITT_BUILD && USE_ITT_NOTIFY
             // Barrier imbalance - write min of the thread time and a child time to the thread.
             if (__kmp_forkjoin_frames_mode == 2) {
@@ -302,6 +307,7 @@ __kmp_tree_barrier_gather(enum barrier_t
         // Mark arrival to parent thread
         /* After performing this write, a worker thread may not assume that the team is valid
            any more - it could be deallocated by the master thread at any time.  */
+        ANNOTATE_BARRIER_BEGIN(this_thr);
         kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[parent_tid]);
         flag.release();
     } else {
@@ -340,6 +346,7 @@ __kmp_tree_barrier_release(enum barrier_
         kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
         flag.wait(this_thr, TRUE
                   USE_ITT_BUILD_ARG(itt_sync_obj) );
+        ANNOTATE_BARRIER_END(this_thr);
 #if USE_ITT_BUILD && USE_ITT_NOTIFY
         if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
             // In fork barrier where we could not get the object reliably (or ITTNOTIFY is disabled)
@@ -408,6 +415,7 @@ __kmp_tree_barrier_release(enum barrier_
                           child_tid, &child_bar->b_go, child_bar->b_go,
                           child_bar->b_go + KMP_BARRIER_STATE_BUMP));
             // Release child from barrier
+            ANNOTATE_BARRIER_BEGIN(child_thr);
             kmp_flag_64 flag(&child_bar->b_go, child_thr);
             flag.release();
             child++;
@@ -468,6 +476,7 @@ __kmp_hyper_barrier_gather(enum barrier_
             /* After performing this write (in the last iteration of the enclosing for loop),
                a worker thread may not assume that the team is valid any more - it could be
                deallocated by the master thread at any time.  */
+            ANNOTATE_BARRIER_BEGIN(this_thr);
             p_flag.set_waiter(other_threads[parent_tid]);
             p_flag.release();
             break;
@@ -495,6 +504,7 @@ __kmp_hyper_barrier_gather(enum barrier_
             kmp_flag_64 c_flag(&child_bar->b_arrived, new_state);
             c_flag.wait(this_thr, FALSE
                         USE_ITT_BUILD_ARG(itt_sync_obj) );
+            ANNOTATE_BARRIER_END(child_thr);
 #if USE_ITT_BUILD && USE_ITT_NOTIFY
             // Barrier imbalance - write min of the thread time and a child time to the thread.
             if (__kmp_forkjoin_frames_mode == 2) {
@@ -568,6 +578,7 @@ __kmp_hyper_barrier_release(enum barrier
         kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
         flag.wait(this_thr, TRUE
                   USE_ITT_BUILD_ARG(itt_sync_obj) );
+        ANNOTATE_BARRIER_END(this_thr);
 #if USE_ITT_BUILD && USE_ITT_NOTIFY
         if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
             // In fork barrier where we could not get the object reliably
@@ -655,6 +666,7 @@ __kmp_hyper_barrier_release(enum barrier
                               child_tid, &child_bar->b_go, child_bar->b_go,
                               child_bar->b_go + KMP_BARRIER_STATE_BUMP));
                 // Release child from barrier
+                ANNOTATE_BARRIER_BEGIN(child_thr);
                 kmp_flag_64 flag(&child_bar->b_go, child_thr);
                 flag.release();
             }
@@ -788,6 +800,7 @@ __kmp_hierarchical_barrier_gather(enum b
                         KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
                                        gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
                                        team->t.t_id, child_tid));
+                        ANNOTATE_BARRIER_END(other_threads[child_tid]);
                         (*reduce)(this_thr->th.th_local.reduce_data, other_threads[child_tid]->th.th_local.reduce_data);
                     }
                     ANNOTATE_REDUCE_BEFORE(reduce);
@@ -809,6 +822,7 @@ __kmp_hierarchical_barrier_gather(enum b
                     kmp_flag_64 flag(&child_bar->b_arrived, new_state);
                     flag.wait(this_thr, FALSE
                               USE_ITT_BUILD_ARG(itt_sync_obj) );
+                    ANNOTATE_BARRIER_END(child_thr);
                     if (reduce) {
                         KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
                                        gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -835,6 +849,7 @@ __kmp_hierarchical_barrier_gather(enum b
                     kmp_flag_64 flag(&child_bar->b_arrived, new_state);
                     flag.wait(this_thr, FALSE
                               USE_ITT_BUILD_ARG(itt_sync_obj) );
+                    ANNOTATE_BARRIER_END(child_thr);
                     if (reduce) {
                         KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
                                        gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -859,6 +874,7 @@ __kmp_hierarchical_barrier_gather(enum b
            the team is valid any more - it could be deallocated by the master thread at any time. */
         if (thr_bar->my_level || __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME
             || !thr_bar->use_oncore_barrier) { // Parent is waiting on my b_arrived flag; release it
+            ANNOTATE_BARRIER_BEGIN(this_thr);
             kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[thr_bar->parent_tid]);
             flag.release();
         }
@@ -904,6 +920,7 @@ __kmp_hierarchical_barrier_release(enum
             kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
             flag.wait(this_thr, TRUE
                       USE_ITT_BUILD_ARG(itt_sync_obj) );
+            ANNOTATE_BARRIER_END(this_thr);
             TCW_8(thr_bar->b_go, KMP_INIT_BARRIER_STATE); // Reset my b_go flag for next time
         }
         else { // Thread barrier data is initialized, this is a leaf, blocktime is infinite, not nested
@@ -1020,6 +1037,7 @@ __kmp_hierarchical_barrier_release(enum
                                       team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
                                       child_bar->b_go + KMP_BARRIER_STATE_BUMP));
                         // Release child using child's b_go flag
+                        ANNOTATE_BARRIER_BEGIN(child_thr);
                         kmp_flag_64 flag(&child_bar->b_go, child_thr);
                         flag.release();
                     }
@@ -1043,6 +1061,7 @@ __kmp_hierarchical_barrier_release(enum
                                   team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
                                   child_bar->b_go + KMP_BARRIER_STATE_BUMP));
                     // Release child using child's b_go flag
+                    ANNOTATE_BARRIER_BEGIN(child_thr);
                     kmp_flag_64 flag(&child_bar->b_go, child_thr);
                     flag.release();
                 }
@@ -1082,7 +1101,7 @@ __kmp_barrier(enum barrier_type bt, int
     KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) has arrived\n",
                   gtid, __kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid)));
 
-    ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar);
+    ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
 #if OMPT_SUPPORT
     if (ompt_enabled) {
 #if OMPT_BLAME
@@ -1325,7 +1344,7 @@ __kmp_barrier(enum barrier_type bt, int
         this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
     }
 #endif
-    ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+    ANNOTATE_BARRIER_END(&team->t.t_bar);
 
     return status;
 }
@@ -1340,7 +1359,7 @@ __kmp_end_split_barrier(enum barrier_typ
     kmp_info_t *this_thr = __kmp_threads[gtid];
     kmp_team_t *team = this_thr->th.th_team;
 
-    ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar);
+    ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
     if (!team->t.t_serialized) {
         if (KMP_MASTER_GTID(gtid)) {
             switch (__kmp_barrier_release_pattern[bt]) {
@@ -1371,7 +1390,7 @@ __kmp_end_split_barrier(enum barrier_typ
             } // if
         }
     }
-    ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+    ANNOTATE_BARRIER_END(&team->t.t_bar);
 }
 
 
@@ -1422,7 +1441,7 @@ __kmp_join_barrier(int gtid)
     KMP_DEBUG_ASSERT(this_thr == team->t.t_threads[tid]);
     KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) arrived at join barrier\n", gtid, team_id, tid));
 
-    ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar);
+    ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
 #if OMPT_SUPPORT
 #if OMPT_TRACE
     if (ompt_enabled &&
@@ -1587,7 +1606,7 @@ __kmp_join_barrier(int gtid)
         this_thr->th.ompt_thread_info.state = ompt_state_overhead;
     }
 #endif
-    ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+    ANNOTATE_BARRIER_END(&team->t.t_bar);
 }
 
 
@@ -1603,7 +1622,7 @@ __kmp_fork_barrier(int gtid, int tid)
     void * itt_sync_obj = NULL;
 #endif /* USE_ITT_BUILD */
     if (team)
-      ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+      ANNOTATE_BARRIER_END(&team->t.t_bar);
 
     KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) has arrived\n",
                   gtid, (team != NULL) ? team->t.t_id : -1, tid));
@@ -1758,7 +1777,7 @@ __kmp_fork_barrier(int gtid, int tid)
         } // (prepare called inside barrier_release)
     }
 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
-    ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+    ANNOTATE_BARRIER_END(&team->t.t_bar);
     KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) is leaving\n", gtid, team->t.t_id, tid));
 }
 

Modified: openmp/trunk/runtime/src/tsan_annotations.h
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/tsan_annotations.h?rev=295158&r1=295157&r2=295158&view=diff
==============================================================================
--- openmp/trunk/runtime/src/tsan_annotations.h (original)
+++ openmp/trunk/runtime/src/tsan_annotations.h Wed Feb 15 02:14:22 2017
@@ -82,19 +82,10 @@ void AnnotateMemoryIsInitialized(const c
 #define ANNOTATE_RWLOCK_CREATE(lck) AnnotateRWLockCreate(__FILE__, __LINE__, (uptr)lck)
 #define ANNOTATE_RWLOCK_RELEASED(lck) AnnotateRWLockAcquired(__FILE__, __LINE__, (uptr)lck, 1)
 #define ANNOTATE_RWLOCK_ACQUIRED(lck) AnnotateRWLockReleased(__FILE__, __LINE__, (uptr)lck, 1)
-
-/* new higher level barrier annotations */
-#define ANNOTATE_NEW_BARRIER_BEGIN(addr) AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr)
-#define ANNOTATE_NEW_BARRIER_END(addr) AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr)
-// #define ANNOTATE_NEW_BARRIER_BEGIN(addr)
-// #define ANNOTATE_NEW_BARRIER_END(addr)
-
-
+#define ANNOTATE_BARRIER_BEGIN(addr) AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr)
+#define ANNOTATE_BARRIER_END(addr) AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr)
 #define ANNOTATE_REDUCE_AFTER(addr) AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr)
 #define ANNOTATE_REDUCE_BEFORE(addr) AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr)
-// #define ANNOTATE_REDUCE_AFTER(addr)
-// #define ANNOTATE_REDUCE_BEFORE(addr)
-
 #else
 #define ANNOTATE_HAPPENS_AFTER(addr)
 #define ANNOTATE_HAPPENS_BEFORE(addr)
@@ -103,8 +94,8 @@ void AnnotateMemoryIsInitialized(const c
 #define ANNOTATE_RWLOCK_CREATE(lck)
 #define ANNOTATE_RWLOCK_RELEASED(lck)
 #define ANNOTATE_RWLOCK_ACQUIRED(lck)
-#define ANNOTATE_NEW_BARRIER_BEGIN(addr)
-#define ANNOTATE_NEW_BARRIER_END(addr)
+#define ANNOTATE_BARRIER_BEGIN(addr)
+#define ANNOTATE_BARRIER_END(addr)
 #define ANNOTATE_REDUCE_AFTER(addr)
 #define ANNOTATE_REDUCE_BEFORE(addr)
 #endif




More information about the Openmp-commits mailing list