[Openmp-commits] [openmp] r293312 - Fix performance issue incurred by removing monitor thread.
Jonathan Peyton via Openmp-commits
openmp-commits at lists.llvm.org
Fri Jan 27 09:54:32 PST 2017
Author: jlpeyton
Date: Fri Jan 27 11:54:31 2017
New Revision: 293312
URL: http://llvm.org/viewvc/llvm-project?rev=293312&view=rev
Log:
Fix performance issue incurred by removing monitor thread.
When the monitor thread is used, most threads in the team directly go to
sleep if the copy of bt_intervals/bt_set is not available in the cache,
and this happens at least once per thread in the wait function, making the
overall performance slightly better.
This change tries to mimic this behavior by using the bt_intervals cache,
which simply keeps the blocktime interval in terms of the platform-dependent
ticks or nanoseconds.
Patch by Hansang Bae
Differential Revision: https://reviews.llvm.org/D28906
Modified:
openmp/trunk/runtime/src/kmp.h
openmp/trunk/runtime/src/kmp_barrier.cpp
openmp/trunk/runtime/src/kmp_wait_release.h
Modified: openmp/trunk/runtime/src/kmp.h
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp.h?rev=293312&r1=293311&r2=293312&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp.h (original)
+++ openmp/trunk/runtime/src/kmp.h Fri Jan 27 11:54:31 2017
@@ -889,6 +889,20 @@ extern int __kmp_place_num_threads_per_c
#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
( ( (blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1 ) / \
(KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) )
+#else
+# if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
+ // HW TSC is used to reduce overhead (clock tick instead of nanosecond).
+ extern double __kmp_ticks_per_nsec;
+# define KMP_NOW() __kmp_hardware_timestamp()
+# define KMP_BLOCKTIME_INTERVAL() (__kmp_dflt_blocktime * KMP_USEC_PER_SEC * __kmp_ticks_per_nsec)
+# define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
+# else
+ // System time is retrieved sporadically while blocking.
+ extern kmp_uint64 __kmp_now_nsec();
+# define KMP_NOW() __kmp_now_nsec()
+# define KMP_BLOCKTIME_INTERVAL() (__kmp_dflt_blocktime * KMP_USEC_PER_SEC)
+# define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
+# endif
#endif // KMP_USE_MONITOR
#define KMP_MIN_STATSCOLS 40
@@ -2220,8 +2234,10 @@ typedef struct KMP_ALIGN_CACHE kmp_base_
/* to exist (from the POV of worker threads). */
#if KMP_USE_MONITOR
int th_team_bt_intervals;
-#endif
int th_team_bt_set;
+#else
+ kmp_uint64 th_team_bt_intervals;
+#endif
#if KMP_AFFINITY_SUPPORTED
Modified: openmp/trunk/runtime/src/kmp_barrier.cpp
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_barrier.cpp?rev=293312&r1=293311&r2=293312&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_barrier.cpp (original)
+++ openmp/trunk/runtime/src/kmp_barrier.cpp Fri Jan 27 11:54:31 2017
@@ -1130,8 +1130,10 @@ __kmp_barrier(enum barrier_type bt, int
if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
#if KMP_USE_MONITOR
this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
-#endif
this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
+#else
+ this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL();
+#endif
}
#if USE_ITT_BUILD
@@ -1453,8 +1455,10 @@ __kmp_join_barrier(int gtid)
if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
#if KMP_USE_MONITOR
this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
-#endif
this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
+#else
+ this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL();
+#endif
}
#if USE_ITT_BUILD
@@ -1644,8 +1648,10 @@ __kmp_fork_barrier(int gtid, int tid)
if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
#if KMP_USE_MONITOR
this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
-#endif
this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
+#else
+ this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL();
+#endif
}
} // master
Modified: openmp/trunk/runtime/src/kmp_wait_release.h
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_wait_release.h?rev=293312&r1=293311&r2=293312&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_wait_release.h (original)
+++ openmp/trunk/runtime/src/kmp_wait_release.h Fri Jan 27 11:54:31 2017
@@ -84,22 +84,6 @@ class kmp_flag {
*/
};
-#if ! KMP_USE_MONITOR
-# if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
- // HW TSC is used to reduce overhead (clock tick instead of nanosecond).
- extern double __kmp_ticks_per_nsec;
-# define KMP_NOW() __kmp_hardware_timestamp()
-# define KMP_BLOCKTIME_INTERVAL() (__kmp_dflt_blocktime * KMP_USEC_PER_SEC * __kmp_ticks_per_nsec)
-# define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
-# else
- // System time is retrieved sporadically while blocking.
- extern kmp_uint64 __kmp_now_nsec();
-# define KMP_NOW() __kmp_now_nsec()
-# define KMP_BLOCKTIME_INTERVAL() (__kmp_dflt_blocktime * KMP_USEC_PER_SEC)
-# define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
-# endif
-#endif
-
/* Spin wait loop that first does pause, then yield, then sleep. A thread that calls __kmp_wait_*
must make certain that another thread calls __kmp_release to wake it back up to prevent deadlocks! */
template <class C>
@@ -187,7 +171,7 @@ __kmp_wait_template(kmp_info_t *this_thr
th_gtid, __kmp_global.g.g_time.dt.t_value, hibernate,
hibernate - __kmp_global.g.g_time.dt.t_value));
#else
- hibernate_goal = KMP_NOW() + KMP_BLOCKTIME_INTERVAL();
+ hibernate_goal = KMP_NOW() + this_thr->th.th_team_bt_intervals;
poll_count = 0;
#endif // KMP_USE_MONITOR
}
More information about the Openmp-commits
mailing list