[Openmp-commits] [openmp] r302929 - Clang-format and whitespace cleanup of source code
Jonathan Peyton via Openmp-commits
openmp-commits at lists.llvm.org
Fri May 12 11:01:35 PDT 2017
Modified: openmp/trunk/runtime/src/kmp_lock.cpp
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_lock.cpp?rev=302929&r1=302928&r2=302929&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_lock.cpp (original)
+++ openmp/trunk/runtime/src/kmp_lock.cpp Fri May 12 13:01:32 2017
@@ -17,55 +17,51 @@
#include <atomic>
#include "kmp.h"
-#include "kmp_itt.h"
#include "kmp_i18n.h"
-#include "kmp_lock.h"
#include "kmp_io.h"
+#include "kmp_itt.h"
+#include "kmp_lock.h"
#include "tsan_annotations.h"
#if KMP_USE_FUTEX
-# include <unistd.h>
-# include <sys/syscall.h>
-// We should really include <futex.h>, but that causes compatibility problems on different
-// Linux* OS distributions that either require that you include (or break when you try to include)
-// <pci/types.h>.
-// Since all we need is the two macros below (which are part of the kernel ABI, so can't change)
-// we just define the constants here and don't include <futex.h>
-# ifndef FUTEX_WAIT
-# define FUTEX_WAIT 0
-# endif
-# ifndef FUTEX_WAKE
-# define FUTEX_WAKE 1
-# endif
+#include <sys/syscall.h>
+#include <unistd.h>
+// We should really include <futex.h>, but that causes compatibility problems on
+// different Linux* OS distributions that either require that you include (or
+// break when you try to include) <pci/types.h>. Since all we need is the two
+// macros below (which are part of the kernel ABI, so can't change) we just
+// define the constants here and don't include <futex.h>
+#ifndef FUTEX_WAIT
+#define FUTEX_WAIT 0
+#endif
+#ifndef FUTEX_WAKE
+#define FUTEX_WAKE 1
+#endif
#endif
/* Implement spin locks for internal library use. */
/* The algorithm implemented is Lamport's bakery lock [1974]. */
-void
-__kmp_validate_locks( void )
-{
- int i;
- kmp_uint32 x, y;
-
- /* Check to make sure unsigned arithmetic does wraps properly */
- x = ~((kmp_uint32) 0) - 2;
- y = x - 2;
-
- for (i = 0; i < 8; ++i, ++x, ++y) {
- kmp_uint32 z = (x - y);
- KMP_ASSERT( z == 2 );
- }
+void __kmp_validate_locks(void) {
+ int i;
+ kmp_uint32 x, y;
+
+ /* Check to make sure unsigned arithmetic does wraps properly */
+ x = ~((kmp_uint32)0) - 2;
+ y = x - 2;
+
+ for (i = 0; i < 8; ++i, ++x, ++y) {
+ kmp_uint32 z = (x - y);
+ KMP_ASSERT(z == 2);
+ }
- KMP_ASSERT( offsetof( kmp_base_queuing_lock, tail_id ) % 8 == 0 );
+ KMP_ASSERT(offsetof(kmp_base_queuing_lock, tail_id) % 8 == 0);
}
-
/* ------------------------------------------------------------------------ */
/* test and set locks */
-//
// For the non-nested locks, we can only assume that the first 4 bytes were
// allocated, since gcc only allocates 4 bytes for omp_lock_t, and the Intel
// compiler only allocates a 4 byte pointer on IA-32 architecture. On
@@ -73,302 +69,253 @@ __kmp_validate_locks( void )
//
// gcc reserves >= 8 bytes for nested locks, so we can assume that the
// entire 8 bytes were allocated for nested locks on all 64-bit platforms.
-//
-static kmp_int32
-__kmp_get_tas_lock_owner( kmp_tas_lock_t *lck )
-{
- return KMP_LOCK_STRIP(TCR_4( lck->lk.poll )) - 1;
+static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) {
+ return KMP_LOCK_STRIP(TCR_4(lck->lk.poll)) - 1;
}
-static inline bool
-__kmp_is_tas_lock_nestable( kmp_tas_lock_t *lck )
-{
- return lck->lk.depth_locked != -1;
+static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) {
+ return lck->lk.depth_locked != -1;
}
__forceinline static int
-__kmp_acquire_tas_lock_timed_template( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- KMP_MB();
+__kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
+ KMP_MB();
#ifdef USE_LOCK_PROFILE
- kmp_uint32 curr = KMP_LOCK_STRIP( TCR_4( lck->lk.poll ) );
- if ( ( curr != 0 ) && ( curr != gtid + 1 ) )
- __kmp_printf( "LOCK CONTENTION: %p\n", lck );
- /* else __kmp_printf( "." );*/
+ kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
+ if ((curr != 0) && (curr != gtid + 1))
+ __kmp_printf("LOCK CONTENTION: %p\n", lck);
+/* else __kmp_printf( "." );*/
#endif /* USE_LOCK_PROFILE */
- if ( ( lck->lk.poll == KMP_LOCK_FREE(tas) )
- && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) {
- KMP_FSYNC_ACQUIRED(lck);
- return KMP_LOCK_ACQUIRED_FIRST;
- }
-
- kmp_uint32 spins;
- KMP_FSYNC_PREPARE( lck );
- KMP_INIT_YIELD( spins );
- if ( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
- __kmp_xproc ) ) {
- KMP_YIELD( TRUE );
- }
- else {
- KMP_YIELD_SPIN( spins );
- }
+ if ((lck->lk.poll == KMP_LOCK_FREE(tas)) &&
+ KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(tas),
+ KMP_LOCK_BUSY(gtid + 1, tas))) {
+ KMP_FSYNC_ACQUIRED(lck);
+ return KMP_LOCK_ACQUIRED_FIRST;
+ }
- kmp_backoff_t backoff = __kmp_spin_backoff_params;
- while ( ( lck->lk.poll != KMP_LOCK_FREE(tas) ) ||
- ( ! KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) ) {
-
- __kmp_spin_backoff(&backoff);
- if ( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
- __kmp_xproc ) ) {
- KMP_YIELD( TRUE );
- }
- else {
- KMP_YIELD_SPIN( spins );
- }
+ kmp_uint32 spins;
+ KMP_FSYNC_PREPARE(lck);
+ KMP_INIT_YIELD(spins);
+ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
+ KMP_YIELD(TRUE);
+ } else {
+ KMP_YIELD_SPIN(spins);
+ }
+
+ kmp_backoff_t backoff = __kmp_spin_backoff_params;
+ while ((lck->lk.poll != KMP_LOCK_FREE(tas)) ||
+ (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(tas),
+ KMP_LOCK_BUSY(gtid + 1, tas)))) {
+
+ __kmp_spin_backoff(&backoff);
+ if (TCR_4(__kmp_nth) >
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
+ KMP_YIELD(TRUE);
+ } else {
+ KMP_YIELD_SPIN(spins);
}
- KMP_FSYNC_ACQUIRED( lck );
- return KMP_LOCK_ACQUIRED_FIRST;
+ }
+ KMP_FSYNC_ACQUIRED(lck);
+ return KMP_LOCK_ACQUIRED_FIRST;
}
-int
-__kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- int retval = __kmp_acquire_tas_lock_timed_template( lck, gtid );
+int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
+ int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid);
ANNOTATE_TAS_ACQUIRED(lck);
return retval;
}
-static int
-__kmp_acquire_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_lock";
- if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
- && __kmp_is_tas_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( ( gtid >= 0 ) && ( __kmp_get_tas_lock_owner( lck ) == gtid ) ) {
- KMP_FATAL( LockIsAlreadyOwned, func );
- }
- return __kmp_acquire_tas_lock( lck, gtid );
-}
-
-int
-__kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- if ( ( lck->lk.poll == KMP_LOCK_FREE(tas) )
- && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) {
- KMP_FSYNC_ACQUIRED( lck );
- return TRUE;
- }
- return FALSE;
-}
-
-static int
-__kmp_test_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_lock";
- if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
- && __kmp_is_tas_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- return __kmp_test_tas_lock( lck, gtid );
-}
-
-int
-__kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- KMP_MB(); /* Flush all pending memory write invalidates. */
-
- KMP_FSYNC_RELEASING(lck);
- ANNOTATE_TAS_RELEASED(lck);
- KMP_ST_REL32( &(lck->lk.poll), KMP_LOCK_FREE(tas) );
- KMP_MB(); /* Flush all pending memory write invalidates. */
-
- KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
- __kmp_xproc ) );
- return KMP_LOCK_RELEASED;
-}
-
-static int
-__kmp_release_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
- && __kmp_is_tas_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_tas_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( ( gtid >= 0 ) && ( __kmp_get_tas_lock_owner( lck ) >= 0 )
- && ( __kmp_get_tas_lock_owner( lck ) != gtid ) ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- return __kmp_release_tas_lock( lck, gtid );
-}
-
-void
-__kmp_init_tas_lock( kmp_tas_lock_t * lck )
-{
- TCW_4( lck->lk.poll, KMP_LOCK_FREE(tas) );
-}
-
-static void
-__kmp_init_tas_lock_with_checks( kmp_tas_lock_t * lck )
-{
- __kmp_init_tas_lock( lck );
-}
-
-void
-__kmp_destroy_tas_lock( kmp_tas_lock_t *lck )
-{
- lck->lk.poll = 0;
-}
-
-static void
-__kmp_destroy_tas_lock_with_checks( kmp_tas_lock_t *lck )
-{
- char const * const func = "omp_destroy_lock";
- if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
- && __kmp_is_tas_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_tas_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_tas_lock( lck );
+static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_lock";
+ if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
+ __kmp_is_tas_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) {
+ KMP_FATAL(LockIsAlreadyOwned, func);
+ }
+ return __kmp_acquire_tas_lock(lck, gtid);
+}
+
+int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
+ if ((lck->lk.poll == KMP_LOCK_FREE(tas)) &&
+ KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(tas),
+ KMP_LOCK_BUSY(gtid + 1, tas))) {
+ KMP_FSYNC_ACQUIRED(lck);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_lock";
+ if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
+ __kmp_is_tas_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ return __kmp_test_tas_lock(lck, gtid);
+}
+
+int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
+ KMP_MB(); /* Flush all pending memory write invalidates. */
+
+ KMP_FSYNC_RELEASING(lck);
+ ANNOTATE_TAS_RELEASED(lck);
+ KMP_ST_REL32(&(lck->lk.poll), KMP_LOCK_FREE(tas));
+ KMP_MB(); /* Flush all pending memory write invalidates. */
+
+ KMP_YIELD(TCR_4(__kmp_nth) >
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
+ return KMP_LOCK_RELEASED;
+}
+
+static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
+ __kmp_is_tas_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_tas_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) >= 0) &&
+ (__kmp_get_tas_lock_owner(lck) != gtid)) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ return __kmp_release_tas_lock(lck, gtid);
+}
+
+void __kmp_init_tas_lock(kmp_tas_lock_t *lck) {
+ TCW_4(lck->lk.poll, KMP_LOCK_FREE(tas));
+}
+
+static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) {
+ __kmp_init_tas_lock(lck);
+}
+
+void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
+
+static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) {
+ char const *const func = "omp_destroy_lock";
+ if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
+ __kmp_is_tas_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_tas_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_tas_lock(lck);
}
-
-//
// nested test and set locks
-//
-int
-__kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
-
- if ( __kmp_get_tas_lock_owner( lck ) == gtid ) {
- lck->lk.depth_locked += 1;
- return KMP_LOCK_ACQUIRED_NEXT;
- }
- else {
- __kmp_acquire_tas_lock_timed_template( lck, gtid );
- ANNOTATE_TAS_ACQUIRED(lck);
- lck->lk.depth_locked = 1;
- return KMP_LOCK_ACQUIRED_FIRST;
- }
-}
+int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
-static int
-__kmp_acquire_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_nest_lock";
- if ( ! __kmp_is_tas_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_acquire_nested_tas_lock( lck, gtid );
+ if (__kmp_get_tas_lock_owner(lck) == gtid) {
+ lck->lk.depth_locked += 1;
+ return KMP_LOCK_ACQUIRED_NEXT;
+ } else {
+ __kmp_acquire_tas_lock_timed_template(lck, gtid);
+ ANNOTATE_TAS_ACQUIRED(lck);
+ lck->lk.depth_locked = 1;
+ return KMP_LOCK_ACQUIRED_FIRST;
+ }
}
-int
-__kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- int retval;
-
- KMP_DEBUG_ASSERT( gtid >= 0 );
-
- if ( __kmp_get_tas_lock_owner( lck ) == gtid ) {
- retval = ++lck->lk.depth_locked;
- }
- else if ( !__kmp_test_tas_lock( lck, gtid ) ) {
- retval = 0;
- }
- else {
- KMP_MB();
- retval = lck->lk.depth_locked = 1;
- }
- return retval;
+static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_nest_lock";
+ if (!__kmp_is_tas_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_acquire_nested_tas_lock(lck, gtid);
}
-static int
-__kmp_test_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_nest_lock";
- if ( ! __kmp_is_tas_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_test_nested_tas_lock( lck, gtid );
-}
+int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
+ int retval;
-int
-__kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
+ KMP_DEBUG_ASSERT(gtid >= 0);
+ if (__kmp_get_tas_lock_owner(lck) == gtid) {
+ retval = ++lck->lk.depth_locked;
+ } else if (!__kmp_test_tas_lock(lck, gtid)) {
+ retval = 0;
+ } else {
KMP_MB();
- if ( --(lck->lk.depth_locked) == 0 ) {
- __kmp_release_tas_lock( lck, gtid );
- return KMP_LOCK_RELEASED;
- }
- return KMP_LOCK_STILL_HELD;
-}
-
-static int
-__kmp_release_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_nest_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( ! __kmp_is_tas_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_tas_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( __kmp_get_tas_lock_owner( lck ) != gtid ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- return __kmp_release_nested_tas_lock( lck, gtid );
+ retval = lck->lk.depth_locked = 1;
+ }
+ return retval;
}
-void
-__kmp_init_nested_tas_lock( kmp_tas_lock_t * lck )
-{
- __kmp_init_tas_lock( lck );
- lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
+static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_nest_lock";
+ if (!__kmp_is_tas_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_test_nested_tas_lock(lck, gtid);
}
-static void
-__kmp_init_nested_tas_lock_with_checks( kmp_tas_lock_t * lck )
-{
- __kmp_init_nested_tas_lock( lck );
+int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
+
+ KMP_MB();
+ if (--(lck->lk.depth_locked) == 0) {
+ __kmp_release_tas_lock(lck, gtid);
+ return KMP_LOCK_RELEASED;
+ }
+ return KMP_LOCK_STILL_HELD;
}
-void
-__kmp_destroy_nested_tas_lock( kmp_tas_lock_t *lck )
-{
- __kmp_destroy_tas_lock( lck );
- lck->lk.depth_locked = 0;
+static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_nest_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if (!__kmp_is_tas_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_tas_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if (__kmp_get_tas_lock_owner(lck) != gtid) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ return __kmp_release_nested_tas_lock(lck, gtid);
+}
+
+void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) {
+ __kmp_init_tas_lock(lck);
+ lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
+}
+
+static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
+ __kmp_init_nested_tas_lock(lck);
+}
+
+void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) {
+ __kmp_destroy_tas_lock(lck);
+ lck->lk.depth_locked = 0;
+}
+
+static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
+ char const *const func = "omp_destroy_nest_lock";
+ if (!__kmp_is_tas_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_tas_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_nested_tas_lock(lck);
}
-static void
-__kmp_destroy_nested_tas_lock_with_checks( kmp_tas_lock_t *lck )
-{
- char const * const func = "omp_destroy_nest_lock";
- if ( ! __kmp_is_tas_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_tas_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_nested_tas_lock( lck );
-}
-
-
#if KMP_USE_FUTEX
/* ------------------------------------------------------------------------ */
@@ -379,1573 +326,1471 @@ __kmp_destroy_nested_tas_lock_with_check
// set locks, and are allocated the same way (i.e. use the area allocated by
// the compiler for non-nested locks / allocate nested locks on the heap).
-static kmp_int32
-__kmp_get_futex_lock_owner( kmp_futex_lock_t *lck )
-{
- return KMP_LOCK_STRIP(( TCR_4( lck->lk.poll ) >> 1 )) - 1;
+static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) {
+ return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
}
-static inline bool
-__kmp_is_futex_lock_nestable( kmp_futex_lock_t *lck )
-{
- return lck->lk.depth_locked != -1;
+static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) {
+ return lck->lk.depth_locked != -1;
}
__forceinline static int
-__kmp_acquire_futex_lock_timed_template( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- kmp_int32 gtid_code = ( gtid + 1 ) << 1;
+__kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) {
+ kmp_int32 gtid_code = (gtid + 1) << 1;
- KMP_MB();
+ KMP_MB();
#ifdef USE_LOCK_PROFILE
- kmp_uint32 curr = KMP_LOCK_STRIP( TCR_4( lck->lk.poll ) );
- if ( ( curr != 0 ) && ( curr != gtid_code ) )
- __kmp_printf( "LOCK CONTENTION: %p\n", lck );
- /* else __kmp_printf( "." );*/
+ kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
+ if ((curr != 0) && (curr != gtid_code))
+ __kmp_printf("LOCK CONTENTION: %p\n", lck);
+/* else __kmp_printf( "." );*/
#endif /* USE_LOCK_PROFILE */
- KMP_FSYNC_PREPARE( lck );
- KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
- lck, lck->lk.poll, gtid ) );
-
- kmp_int32 poll_val;
-
- while ( ( poll_val = KMP_COMPARE_AND_STORE_RET32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex),
- KMP_LOCK_BUSY(gtid_code, futex) ) ) != KMP_LOCK_FREE(futex) ) {
-
- kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
- KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
- lck, gtid, poll_val, cond ) );
-
- //
- // NOTE: if you try to use the following condition for this branch
- //
- // if ( poll_val & 1 == 0 )
- //
- // Then the 12.0 compiler has a bug where the following block will
- // always be skipped, regardless of the value of the LSB of poll_val.
- //
- if ( ! cond ) {
- //
- // Try to set the lsb in the poll to indicate to the owner
- // thread that they need to wake this thread up.
- //
- if ( ! KMP_COMPARE_AND_STORE_REL32( & ( lck->lk.poll ), poll_val, poll_val | KMP_LOCK_BUSY(1, futex) ) ) {
- KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
- lck, lck->lk.poll, gtid ) );
- continue;
- }
- poll_val |= KMP_LOCK_BUSY(1, futex);
-
- KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n",
- lck, lck->lk.poll, gtid ) );
- }
-
- KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
- lck, gtid, poll_val ) );
-
- kmp_int32 rc;
- if ( ( rc = syscall( __NR_futex, & ( lck->lk.poll ), FUTEX_WAIT,
- poll_val, NULL, NULL, 0 ) ) != 0 ) {
- KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) failed (rc=%d errno=%d)\n",
- lck, gtid, poll_val, rc, errno ) );
- continue;
- }
-
- KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
- lck, gtid, poll_val ) );
- //
- // This thread has now done a successful futex wait call and was
- // entered on the OS futex queue. We must now perform a futex
- // wake call when releasing the lock, as we have no idea how many
- // other threads are in the queue.
- //
- gtid_code |= 1;
- }
-
- KMP_FSYNC_ACQUIRED( lck );
- KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n",
- lck, lck->lk.poll, gtid ) );
- return KMP_LOCK_ACQUIRED_FIRST;
+ KMP_FSYNC_PREPARE(lck);
+ KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
+ lck, lck->lk.poll, gtid));
+
+ kmp_int32 poll_val;
+
+ while ((poll_val = KMP_COMPARE_AND_STORE_RET32(
+ &(lck->lk.poll), KMP_LOCK_FREE(futex),
+ KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) {
+
+ kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
+ KA_TRACE(
+ 1000,
+ ("__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
+ lck, gtid, poll_val, cond));
+
+ // NOTE: if you try to use the following condition for this branch
+ //
+ // if ( poll_val & 1 == 0 )
+ //
+ // Then the 12.0 compiler has a bug where the following block will
+ // always be skipped, regardless of the value of the LSB of poll_val.
+ if (!cond) {
+ // Try to set the lsb in the poll to indicate to the owner
+ // thread that they need to wake this thread up.
+ if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
+ poll_val | KMP_LOCK_BUSY(1, futex))) {
+ KA_TRACE(
+ 1000,
+ ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
+ lck, lck->lk.poll, gtid));
+ continue;
+ }
+ poll_val |= KMP_LOCK_BUSY(1, futex);
+
+ KA_TRACE(1000,
+ ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", lck,
+ lck->lk.poll, gtid));
+ }
+
+ KA_TRACE(
+ 1000,
+ ("__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
+ lck, gtid, poll_val));
+
+ kmp_int32 rc;
+ if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
+ NULL, 0)) != 0) {
+ KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) "
+ "failed (rc=%d errno=%d)\n",
+ lck, gtid, poll_val, rc, errno));
+ continue;
+ }
+
+ KA_TRACE(1000,
+ ("__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
+ lck, gtid, poll_val));
+ // This thread has now done a successful futex wait call and was entered on
+ // the OS futex queue. We must now perform a futex wake call when releasing
+ // the lock, as we have no idea how many other threads are in the queue.
+ gtid_code |= 1;
+ }
+
+ KMP_FSYNC_ACQUIRED(lck);
+ KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
+ lck->lk.poll, gtid));
+ return KMP_LOCK_ACQUIRED_FIRST;
}
-int
-__kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- int retval = __kmp_acquire_futex_lock_timed_template( lck, gtid );
+int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
+ int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid);
ANNOTATE_FUTEX_ACQUIRED(lck);
return retval;
}
-static int
-__kmp_acquire_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_lock";
- if ( ( sizeof ( kmp_futex_lock_t ) <= OMP_LOCK_T_SIZE )
- && __kmp_is_futex_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( ( gtid >= 0 ) && ( __kmp_get_futex_lock_owner( lck ) == gtid ) ) {
- KMP_FATAL( LockIsAlreadyOwned, func );
- }
- return __kmp_acquire_futex_lock( lck, gtid );
-}
-
-int
-__kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- if ( KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY((gtid+1) << 1, futex) ) ) {
- KMP_FSYNC_ACQUIRED( lck );
- return TRUE;
- }
- return FALSE;
-}
-
-static int
-__kmp_test_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_lock";
- if ( ( sizeof ( kmp_futex_lock_t ) <= OMP_LOCK_T_SIZE )
- && __kmp_is_futex_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- return __kmp_test_futex_lock( lck, gtid );
-}
-
-int
-__kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- KMP_MB(); /* Flush all pending memory write invalidates. */
-
- KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
- lck, lck->lk.poll, gtid ) );
-
- KMP_FSYNC_RELEASING(lck);
- ANNOTATE_FUTEX_RELEASED(lck);
-
- kmp_int32 poll_val = KMP_XCHG_FIXED32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex) );
-
- KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
- lck, gtid, poll_val ) );
-
- if ( KMP_LOCK_STRIP(poll_val) & 1 ) {
- KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
- lck, gtid ) );
- syscall( __NR_futex, & ( lck->lk.poll ), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0 );
- }
-
- KMP_MB(); /* Flush all pending memory write invalidates. */
-
- KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n",
- lck, lck->lk.poll, gtid ) );
-
- KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
- __kmp_xproc ) );
- return KMP_LOCK_RELEASED;
-}
-
-static int
-__kmp_release_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( ( sizeof ( kmp_futex_lock_t ) <= OMP_LOCK_T_SIZE )
- && __kmp_is_futex_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_futex_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( ( gtid >= 0 ) && ( __kmp_get_futex_lock_owner( lck ) >= 0 )
- && ( __kmp_get_futex_lock_owner( lck ) != gtid ) ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- return __kmp_release_futex_lock( lck, gtid );
-}
-
-void
-__kmp_init_futex_lock( kmp_futex_lock_t * lck )
-{
- TCW_4( lck->lk.poll, KMP_LOCK_FREE(futex) );
-}
-
-static void
-__kmp_init_futex_lock_with_checks( kmp_futex_lock_t * lck )
-{
- __kmp_init_futex_lock( lck );
-}
-
-void
-__kmp_destroy_futex_lock( kmp_futex_lock_t *lck )
-{
- lck->lk.poll = 0;
-}
-
-static void
-__kmp_destroy_futex_lock_with_checks( kmp_futex_lock_t *lck )
-{
- char const * const func = "omp_destroy_lock";
- if ( ( sizeof ( kmp_futex_lock_t ) <= OMP_LOCK_T_SIZE )
- && __kmp_is_futex_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_futex_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_futex_lock( lck );
+static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_lock";
+ if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
+ __kmp_is_futex_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) == gtid)) {
+ KMP_FATAL(LockIsAlreadyOwned, func);
+ }
+ return __kmp_acquire_futex_lock(lck, gtid);
+}
+
+int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
+ if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
+ KMP_LOCK_BUSY((gtid + 1) << 1, futex))) {
+ KMP_FSYNC_ACQUIRED(lck);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_lock";
+ if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
+ __kmp_is_futex_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ return __kmp_test_futex_lock(lck, gtid);
+}
+
+int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
+ KMP_MB(); /* Flush all pending memory write invalidates. */
+
+ KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
+ lck, lck->lk.poll, gtid));
+
+ KMP_FSYNC_RELEASING(lck);
+ ANNOTATE_FUTEX_RELEASED(lck);
+
+ kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
+
+ KA_TRACE(1000,
+ ("__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
+ lck, gtid, poll_val));
+
+ if (KMP_LOCK_STRIP(poll_val) & 1) {
+ KA_TRACE(1000,
+ ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
+ lck, gtid));
+ syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
+ NULL, NULL, 0);
+ }
+
+ KMP_MB(); /* Flush all pending memory write invalidates. */
+
+ KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
+ lck->lk.poll, gtid));
+
+ KMP_YIELD(TCR_4(__kmp_nth) >
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
+ return KMP_LOCK_RELEASED;
+}
+
+static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
+ __kmp_is_futex_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_futex_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) >= 0) &&
+ (__kmp_get_futex_lock_owner(lck) != gtid)) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ return __kmp_release_futex_lock(lck, gtid);
+}
+
+void __kmp_init_futex_lock(kmp_futex_lock_t *lck) {
+ TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
+}
+
+static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) {
+ __kmp_init_futex_lock(lck);
+}
+
+void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
+
+static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) {
+ char const *const func = "omp_destroy_lock";
+ if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
+ __kmp_is_futex_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_futex_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_futex_lock(lck);
}
-
-//
// nested futex locks
-//
-int
-__kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
-
- if ( __kmp_get_futex_lock_owner( lck ) == gtid ) {
- lck->lk.depth_locked += 1;
- return KMP_LOCK_ACQUIRED_NEXT;
- }
- else {
- __kmp_acquire_futex_lock_timed_template( lck, gtid );
- ANNOTATE_FUTEX_ACQUIRED(lck);
- lck->lk.depth_locked = 1;
- return KMP_LOCK_ACQUIRED_FIRST;
- }
-}
+int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
-static int
-__kmp_acquire_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_nest_lock";
- if ( ! __kmp_is_futex_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_acquire_nested_futex_lock( lck, gtid );
+ if (__kmp_get_futex_lock_owner(lck) == gtid) {
+ lck->lk.depth_locked += 1;
+ return KMP_LOCK_ACQUIRED_NEXT;
+ } else {
+ __kmp_acquire_futex_lock_timed_template(lck, gtid);
+ ANNOTATE_FUTEX_ACQUIRED(lck);
+ lck->lk.depth_locked = 1;
+ return KMP_LOCK_ACQUIRED_FIRST;
+ }
}
-int
-__kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- int retval;
-
- KMP_DEBUG_ASSERT( gtid >= 0 );
-
- if ( __kmp_get_futex_lock_owner( lck ) == gtid ) {
- retval = ++lck->lk.depth_locked;
- }
- else if ( !__kmp_test_futex_lock( lck, gtid ) ) {
- retval = 0;
- }
- else {
- KMP_MB();
- retval = lck->lk.depth_locked = 1;
- }
- return retval;
+static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_nest_lock";
+ if (!__kmp_is_futex_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_acquire_nested_futex_lock(lck, gtid);
}
-static int
-__kmp_test_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_nest_lock";
- if ( ! __kmp_is_futex_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_test_nested_futex_lock( lck, gtid );
-}
+int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
+ int retval;
-int
-__kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
+ KMP_DEBUG_ASSERT(gtid >= 0);
+ if (__kmp_get_futex_lock_owner(lck) == gtid) {
+ retval = ++lck->lk.depth_locked;
+ } else if (!__kmp_test_futex_lock(lck, gtid)) {
+ retval = 0;
+ } else {
KMP_MB();
- if ( --(lck->lk.depth_locked) == 0 ) {
- __kmp_release_futex_lock( lck, gtid );
- return KMP_LOCK_RELEASED;
- }
- return KMP_LOCK_STILL_HELD;
-}
-
-static int
-__kmp_release_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_nest_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( ! __kmp_is_futex_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_futex_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( __kmp_get_futex_lock_owner( lck ) != gtid ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- return __kmp_release_nested_futex_lock( lck, gtid );
+ retval = lck->lk.depth_locked = 1;
+ }
+ return retval;
}
-void
-__kmp_init_nested_futex_lock( kmp_futex_lock_t * lck )
-{
- __kmp_init_futex_lock( lck );
- lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
+static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_nest_lock";
+ if (!__kmp_is_futex_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_test_nested_futex_lock(lck, gtid);
}
-static void
-__kmp_init_nested_futex_lock_with_checks( kmp_futex_lock_t * lck )
-{
- __kmp_init_nested_futex_lock( lck );
+int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
+
+ KMP_MB();
+ if (--(lck->lk.depth_locked) == 0) {
+ __kmp_release_futex_lock(lck, gtid);
+ return KMP_LOCK_RELEASED;
+ }
+ return KMP_LOCK_STILL_HELD;
}
-void
-__kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck )
-{
- __kmp_destroy_futex_lock( lck );
- lck->lk.depth_locked = 0;
-}
-
-static void
-__kmp_destroy_nested_futex_lock_with_checks( kmp_futex_lock_t *lck )
-{
- char const * const func = "omp_destroy_nest_lock";
- if ( ! __kmp_is_futex_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_futex_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_nested_futex_lock( lck );
+static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_nest_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if (!__kmp_is_futex_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_futex_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if (__kmp_get_futex_lock_owner(lck) != gtid) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ return __kmp_release_nested_futex_lock(lck, gtid);
+}
+
+void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) {
+ __kmp_init_futex_lock(lck);
+ lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
+}
+
+static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
+ __kmp_init_nested_futex_lock(lck);
+}
+
+void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) {
+ __kmp_destroy_futex_lock(lck);
+ lck->lk.depth_locked = 0;
+}
+
+static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
+ char const *const func = "omp_destroy_nest_lock";
+ if (!__kmp_is_futex_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_futex_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_nested_futex_lock(lck);
}
#endif // KMP_USE_FUTEX
-
/* ------------------------------------------------------------------------ */
/* ticket (bakery) locks */
-static kmp_int32
-__kmp_get_ticket_lock_owner( kmp_ticket_lock_t *lck )
-{
- return std::atomic_load_explicit( &lck->lk.owner_id, std::memory_order_relaxed ) - 1;
-}
-
-static inline bool
-__kmp_is_ticket_lock_nestable( kmp_ticket_lock_t *lck )
-{
- return std::atomic_load_explicit( &lck->lk.depth_locked, std::memory_order_relaxed ) != -1;
-}
-
-static kmp_uint32
-__kmp_bakery_check( void *now_serving, kmp_uint32 my_ticket )
-{
- return std::atomic_load_explicit( (std::atomic<unsigned> *)now_serving, std::memory_order_acquire ) == my_ticket;
+static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) {
+ return std::atomic_load_explicit(&lck->lk.owner_id,
+ std::memory_order_relaxed) -
+ 1;
+}
+
+static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) {
+ return std::atomic_load_explicit(&lck->lk.depth_locked,
+ std::memory_order_relaxed) != -1;
+}
+
+static kmp_uint32 __kmp_bakery_check(void *now_serving, kmp_uint32 my_ticket) {
+ return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving,
+ std::memory_order_acquire) == my_ticket;
}
__forceinline static int
-__kmp_acquire_ticket_lock_timed_template( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- kmp_uint32 my_ticket = std::atomic_fetch_add_explicit( &lck->lk.next_ticket, 1U, std::memory_order_relaxed );
+__kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid) {
+ kmp_uint32 my_ticket = std::atomic_fetch_add_explicit(
+ &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
#ifdef USE_LOCK_PROFILE
- if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ) != my_ticket )
- __kmp_printf( "LOCK CONTENTION: %p\n", lck );
- /* else __kmp_printf( "." );*/
+ if (std::atomic_load_explicit(&lck->lk.now_serving,
+ std::memory_order_relaxed) != my_ticket)
+ __kmp_printf("LOCK CONTENTION: %p\n", lck);
+/* else __kmp_printf( "." );*/
#endif /* USE_LOCK_PROFILE */
- if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_acquire ) == my_ticket ) {
- return KMP_LOCK_ACQUIRED_FIRST;
- }
- KMP_WAIT_YIELD_PTR( &lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck );
+ if (std::atomic_load_explicit(&lck->lk.now_serving,
+ std::memory_order_acquire) == my_ticket) {
return KMP_LOCK_ACQUIRED_FIRST;
+ }
+ KMP_WAIT_YIELD_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
+ return KMP_LOCK_ACQUIRED_FIRST;
}
-int
-__kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- int retval = __kmp_acquire_ticket_lock_timed_template( lck, gtid );
+int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
+ int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid);
ANNOTATE_TICKET_ACQUIRED(lck);
return retval;
}
-static int
-__kmp_acquire_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_lock";
-
- if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( lck->lk.self != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_ticket_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( ( gtid >= 0 ) && ( __kmp_get_ticket_lock_owner( lck ) == gtid ) ) {
- KMP_FATAL( LockIsAlreadyOwned, func );
- }
-
- __kmp_acquire_ticket_lock( lck, gtid );
-
- std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
- return KMP_LOCK_ACQUIRED_FIRST;
-}
-
-int
-__kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- kmp_uint32 my_ticket = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed );
-
- if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ) == my_ticket ) {
- kmp_uint32 next_ticket = my_ticket + 1;
- if ( std::atomic_compare_exchange_strong_explicit( &lck->lk.next_ticket,
- &my_ticket, next_ticket, std::memory_order_acquire, std::memory_order_acquire )) {
- return TRUE;
- }
- }
- return FALSE;
-}
-
-static int
-__kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_lock";
-
- if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( lck->lk.self != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_ticket_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
-
- int retval = __kmp_test_ticket_lock( lck, gtid );
-
- if ( retval ) {
- std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
- }
- return retval;
-}
-
-int
-__kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- kmp_uint32 distance = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed ) - std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed );
-
- ANNOTATE_TICKET_RELEASED(lck);
- std::atomic_fetch_add_explicit( &lck->lk.now_serving, 1U, std::memory_order_release );
-
- KMP_YIELD( distance
- > (kmp_uint32) (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) );
- return KMP_LOCK_RELEASED;
-}
-
-static int
-__kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_lock";
-
- if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( lck->lk.self != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_ticket_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_ticket_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( ( gtid >= 0 ) && ( __kmp_get_ticket_lock_owner( lck ) >= 0 )
- && ( __kmp_get_ticket_lock_owner( lck ) != gtid ) ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
- return __kmp_release_ticket_lock( lck, gtid );
-}
-
-void
-__kmp_init_ticket_lock( kmp_ticket_lock_t * lck )
-{
- lck->lk.location = NULL;
- lck->lk.self = lck;
- std::atomic_store_explicit( &lck->lk.next_ticket, 0U, std::memory_order_relaxed );
- std::atomic_store_explicit( &lck->lk.now_serving, 0U, std::memory_order_relaxed );
- std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed ); // no thread owns the lock.
- std::atomic_store_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ); // -1 => not a nested lock.
- std::atomic_store_explicit( &lck->lk.initialized, true, std::memory_order_release );
-}
-
-static void
-__kmp_init_ticket_lock_with_checks( kmp_ticket_lock_t * lck )
-{
- __kmp_init_ticket_lock( lck );
-}
-
-void
-__kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck )
-{
- std::atomic_store_explicit( &lck->lk.initialized, false, std::memory_order_release );
- lck->lk.self = NULL;
- lck->lk.location = NULL;
- std::atomic_store_explicit( &lck->lk.next_ticket, 0U, std::memory_order_relaxed );
- std::atomic_store_explicit( &lck->lk.now_serving, 0U, std::memory_order_relaxed );
- std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
- std::atomic_store_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed );
+static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_lock";
+
+ if (!std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (lck->lk.self != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_ticket_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) == gtid)) {
+ KMP_FATAL(LockIsAlreadyOwned, func);
+ }
+
+ __kmp_acquire_ticket_lock(lck, gtid);
+
+ std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
+ std::memory_order_relaxed);
+ return KMP_LOCK_ACQUIRED_FIRST;
+}
+
+int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
+ kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
+ std::memory_order_relaxed);
+
+ if (std::atomic_load_explicit(&lck->lk.now_serving,
+ std::memory_order_relaxed) == my_ticket) {
+ kmp_uint32 next_ticket = my_ticket + 1;
+ if (std::atomic_compare_exchange_strong_explicit(
+ &lck->lk.next_ticket, &my_ticket, next_ticket,
+ std::memory_order_acquire, std::memory_order_acquire)) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_lock";
+
+ if (!std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (lck->lk.self != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_ticket_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+
+ int retval = __kmp_test_ticket_lock(lck, gtid);
+
+ if (retval) {
+ std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
+ std::memory_order_relaxed);
+ }
+ return retval;
}
-static void
-__kmp_destroy_ticket_lock_with_checks( kmp_ticket_lock_t *lck )
-{
- char const * const func = "omp_destroy_lock";
-
- if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( lck->lk.self != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_ticket_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_ticket_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_ticket_lock( lck );
+int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
+ kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
+ std::memory_order_relaxed) -
+ std::atomic_load_explicit(&lck->lk.now_serving,
+ std::memory_order_relaxed);
+
+ ANNOTATE_TICKET_RELEASED(lck);
+ std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
+ std::memory_order_release);
+
+ KMP_YIELD(distance >
+ (kmp_uint32)(__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
+ return KMP_LOCK_RELEASED;
+}
+
+static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_lock";
+
+ if (!std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (lck->lk.self != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_ticket_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_ticket_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) >= 0) &&
+ (__kmp_get_ticket_lock_owner(lck) != gtid)) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
+ return __kmp_release_ticket_lock(lck, gtid);
+}
+
+void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) {
+ lck->lk.location = NULL;
+ lck->lk.self = lck;
+ std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
+ std::memory_order_relaxed);
+ std::atomic_store_explicit(&lck->lk.now_serving, 0U,
+ std::memory_order_relaxed);
+ std::atomic_store_explicit(
+ &lck->lk.owner_id, 0,
+ std::memory_order_relaxed); // no thread owns the lock.
+ std::atomic_store_explicit(
+ &lck->lk.depth_locked, -1,
+ std::memory_order_relaxed); // -1 => not a nested lock.
+ std::atomic_store_explicit(&lck->lk.initialized, true,
+ std::memory_order_release);
+}
+
+static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
+ __kmp_init_ticket_lock(lck);
+}
+
+void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) {
+ std::atomic_store_explicit(&lck->lk.initialized, false,
+ std::memory_order_release);
+ lck->lk.self = NULL;
+ lck->lk.location = NULL;
+ std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
+ std::memory_order_relaxed);
+ std::atomic_store_explicit(&lck->lk.now_serving, 0U,
+ std::memory_order_relaxed);
+ std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
+ std::atomic_store_explicit(&lck->lk.depth_locked, -1,
+ std::memory_order_relaxed);
+}
+
+static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
+ char const *const func = "omp_destroy_lock";
+
+ if (!std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (lck->lk.self != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_ticket_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_ticket_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_ticket_lock(lck);
}
-
-//
// nested ticket locks
-//
-
-int
-__kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
-
- if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) {
- std::atomic_fetch_add_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
- return KMP_LOCK_ACQUIRED_NEXT;
- }
- else {
- __kmp_acquire_ticket_lock_timed_template( lck, gtid );
- ANNOTATE_TICKET_ACQUIRED(lck);
- std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
- std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
- return KMP_LOCK_ACQUIRED_FIRST;
- }
-}
-
-static int
-__kmp_acquire_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_nest_lock";
-
- if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( lck->lk.self != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_acquire_nested_ticket_lock( lck, gtid );
-}
-
-int
-__kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- int retval;
-
- KMP_DEBUG_ASSERT( gtid >= 0 );
-
- if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) {
- retval = std::atomic_fetch_add_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed ) + 1;
- }
- else if ( !__kmp_test_ticket_lock( lck, gtid ) ) {
- retval = 0;
- }
- else {
- std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
- std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
- retval = 1;
- }
- return retval;
-}
-
-static int
-__kmp_test_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck,
- kmp_int32 gtid )
-{
- char const * const func = "omp_test_nest_lock";
-
- if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( lck->lk.self != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_test_nested_ticket_lock( lck, gtid );
-}
-
-int
-__kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
- if ( ( std::atomic_fetch_add_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ) - 1 ) == 0 ) {
- std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
- __kmp_release_ticket_lock( lck, gtid );
- return KMP_LOCK_RELEASED;
- }
- return KMP_LOCK_STILL_HELD;
-}
+int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
-static int
-__kmp_release_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_nest_lock";
-
- if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( lck->lk.self != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_ticket_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( __kmp_get_ticket_lock_owner( lck ) != gtid ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- return __kmp_release_nested_ticket_lock( lck, gtid );
+ if (__kmp_get_ticket_lock_owner(lck) == gtid) {
+ std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
+ std::memory_order_relaxed);
+ return KMP_LOCK_ACQUIRED_NEXT;
+ } else {
+ __kmp_acquire_ticket_lock_timed_template(lck, gtid);
+ ANNOTATE_TICKET_ACQUIRED(lck);
+ std::atomic_store_explicit(&lck->lk.depth_locked, 1,
+ std::memory_order_relaxed);
+ std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
+ std::memory_order_relaxed);
+ return KMP_LOCK_ACQUIRED_FIRST;
+ }
}
-void
-__kmp_init_nested_ticket_lock( kmp_ticket_lock_t * lck )
-{
- __kmp_init_ticket_lock( lck );
- std::atomic_store_explicit( &lck->lk.depth_locked, 0, std::memory_order_relaxed ); // >= 0 for nestable locks, -1 for simple locks
+static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_nest_lock";
+
+ if (!std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (lck->lk.self != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_ticket_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_acquire_nested_ticket_lock(lck, gtid);
+}
+
+int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
+ int retval;
+
+ KMP_DEBUG_ASSERT(gtid >= 0);
+
+ if (__kmp_get_ticket_lock_owner(lck) == gtid) {
+ retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
+ std::memory_order_relaxed) +
+ 1;
+ } else if (!__kmp_test_ticket_lock(lck, gtid)) {
+ retval = 0;
+ } else {
+ std::atomic_store_explicit(&lck->lk.depth_locked, 1,
+ std::memory_order_relaxed);
+ std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
+ std::memory_order_relaxed);
+ retval = 1;
+ }
+ return retval;
}
-static void
-__kmp_init_nested_ticket_lock_with_checks( kmp_ticket_lock_t * lck )
-{
- __kmp_init_nested_ticket_lock( lck );
+static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_nest_lock";
+
+ if (!std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (lck->lk.self != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_ticket_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_test_nested_ticket_lock(lck, gtid);
+}
+
+int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
+
+ if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
+ std::memory_order_relaxed) -
+ 1) == 0) {
+ std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
+ __kmp_release_ticket_lock(lck, gtid);
+ return KMP_LOCK_RELEASED;
+ }
+ return KMP_LOCK_STILL_HELD;
}
-void
-__kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck )
-{
- __kmp_destroy_ticket_lock( lck );
- std::atomic_store_explicit( &lck->lk.depth_locked, 0, std::memory_order_relaxed );
+static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_nest_lock";
+
+ if (!std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (lck->lk.self != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_ticket_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_ticket_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if (__kmp_get_ticket_lock_owner(lck) != gtid) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ return __kmp_release_nested_ticket_lock(lck, gtid);
+}
+
+void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) {
+ __kmp_init_ticket_lock(lck);
+ std::atomic_store_explicit(&lck->lk.depth_locked, 0,
+ std::memory_order_relaxed); // >= 0 for nestable
+ // locks, -1 for simple
+ // locks
+}
+
+static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
+ __kmp_init_nested_ticket_lock(lck);
+}
+
+void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) {
+ __kmp_destroy_ticket_lock(lck);
+ std::atomic_store_explicit(&lck->lk.depth_locked, 0,
+ std::memory_order_relaxed);
}
static void
-__kmp_destroy_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck )
-{
- char const * const func = "omp_destroy_nest_lock";
-
- if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( lck->lk.self != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_ticket_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_nested_ticket_lock( lck );
+__kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
+ char const *const func = "omp_destroy_nest_lock";
+
+ if (!std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (lck->lk.self != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_ticket_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_ticket_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_nested_ticket_lock(lck);
}
-
-//
// access functions to fields which don't exist for all lock kinds.
-//
-static int
-__kmp_is_ticket_lock_initialized( kmp_ticket_lock_t *lck )
-{
- return std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) && ( lck->lk.self == lck);
+static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) {
+ return std::atomic_load_explicit(&lck->lk.initialized,
+ std::memory_order_relaxed) &&
+ (lck->lk.self == lck);
}
-static const ident_t *
-__kmp_get_ticket_lock_location( kmp_ticket_lock_t *lck )
-{
- return lck->lk.location;
+static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) {
+ return lck->lk.location;
}
-static void
-__kmp_set_ticket_lock_location( kmp_ticket_lock_t *lck, const ident_t *loc )
-{
- lck->lk.location = loc;
+static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck,
+ const ident_t *loc) {
+ lck->lk.location = loc;
}
-static kmp_lock_flags_t
-__kmp_get_ticket_lock_flags( kmp_ticket_lock_t *lck )
-{
- return lck->lk.flags;
+static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) {
+ return lck->lk.flags;
}
-static void
-__kmp_set_ticket_lock_flags( kmp_ticket_lock_t *lck, kmp_lock_flags_t flags )
-{
- lck->lk.flags = flags;
+static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck,
+ kmp_lock_flags_t flags) {
+ lck->lk.flags = flags;
}
/* ------------------------------------------------------------------------ */
/* queuing locks */
-/*
- * First the states
- * (head,tail) = 0, 0 means lock is unheld, nobody on queue
- * UINT_MAX or -1, 0 means lock is held, nobody on queue
- * h, h means lock is held or about to transition, 1 element on queue
- * h, t h <> t, means lock is held or about to transition, >1 elements on queue
- *
- * Now the transitions
- * Acquire(0,0) = -1 ,0
- * Release(0,0) = Error
- * Acquire(-1,0) = h ,h h > 0
- * Release(-1,0) = 0 ,0
- * Acquire(h,h) = h ,t h > 0, t > 0, h <> t
- * Release(h,h) = -1 ,0 h > 0
- * Acquire(h,t) = h ,t' h > 0, t > 0, t' > 0, h <> t, h <> t', t <> t'
- * Release(h,t) = h',t h > 0, t > 0, h <> t, h <> h', h' maybe = t
- *
- * And pictorially
- *
- *
- * +-----+
- * | 0, 0|------- release -------> Error
- * +-----+
- * | ^
- * acquire| |release
- * | |
- * | |
- * v |
- * +-----+
- * |-1, 0|
- * +-----+
- * | ^
- * acquire| |release
- * | |
- * | |
- * v |
- * +-----+
- * | h, h|
- * +-----+
- * | ^
- * acquire| |release
- * | |
- * | |
- * v |
- * +-----+
- * | h, t|----- acquire, release loopback ---+
- * +-----+ |
- * ^ |
- * | |
- * +------------------------------------+
- *
+/* First the states
+ (head,tail) = 0, 0 means lock is unheld, nobody on queue
+ UINT_MAX or -1, 0 means lock is held, nobody on queue
+ h, h means lock held or about to transition,
+ 1 element on queue
+ h, t h <> t, means lock is held or about to
+ transition, >1 elements on queue
+
+ Now the transitions
+ Acquire(0,0) = -1 ,0
+ Release(0,0) = Error
+ Acquire(-1,0) = h ,h h > 0
+ Release(-1,0) = 0 ,0
+ Acquire(h,h) = h ,t h > 0, t > 0, h <> t
+ Release(h,h) = -1 ,0 h > 0
+ Acquire(h,t) = h ,t' h > 0, t > 0, t' > 0, h <> t, h <> t', t <> t'
+ Release(h,t) = h',t h > 0, t > 0, h <> t, h <> h', h' maybe = t
+
+ And pictorially
+
+ +-----+
+ | 0, 0|------- release -------> Error
+ +-----+
+ | ^
+ acquire| |release
+ | |
+ | |
+ v |
+ +-----+
+ |-1, 0|
+ +-----+
+ | ^
+ acquire| |release
+ | |
+ | |
+ v |
+ +-----+
+ | h, h|
+ +-----+
+ | ^
+ acquire| |release
+ | |
+ | |
+ v |
+ +-----+
+ | h, t|----- acquire, release loopback ---+
+ +-----+ |
+ ^ |
+ | |
+ +------------------------------------+
*/
#ifdef DEBUG_QUEUING_LOCKS
/* Stuff for circular trace buffer */
-#define TRACE_BUF_ELE 1024
-static char traces[TRACE_BUF_ELE][128] = { 0 }
+#define TRACE_BUF_ELE 1024
+static char traces[TRACE_BUF_ELE][128] = {0};
static int tc = 0;
-#define TRACE_LOCK(X,Y) KMP_SNPRINTF( traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y );
-#define TRACE_LOCK_T(X,Y,Z) KMP_SNPRINTF( traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X,Y,Z );
-#define TRACE_LOCK_HT(X,Y,Z,Q) KMP_SNPRINTF( traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, Z, Q );
-
-static void
-__kmp_dump_queuing_lock( kmp_info_t *this_thr, kmp_int32 gtid,
- kmp_queuing_lock_t *lck, kmp_int32 head_id, kmp_int32 tail_id )
-{
- kmp_int32 t, i;
-
- __kmp_printf_no_lock( "\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n" );
-
- i = tc % TRACE_BUF_ELE;
- __kmp_printf_no_lock( "%s\n", traces[i] );
- i = (i+1) % TRACE_BUF_ELE;
- while ( i != (tc % TRACE_BUF_ELE) ) {
- __kmp_printf_no_lock( "%s", traces[i] );
- i = (i+1) % TRACE_BUF_ELE;
- }
- __kmp_printf_no_lock( "\n" );
-
- __kmp_printf_no_lock(
- "\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, next_wait:%d, head_id:%d, tail_id:%d\n",
- gtid+1, this_thr->th.th_spin_here, this_thr->th.th_next_waiting,
- head_id, tail_id );
-
- __kmp_printf_no_lock( "\t\thead: %d ", lck->lk.head_id );
-
- if ( lck->lk.head_id >= 1 ) {
- t = __kmp_threads[lck->lk.head_id-1]->th.th_next_waiting;
- while (t > 0) {
- __kmp_printf_no_lock( "-> %d ", t );
- t = __kmp_threads[t-1]->th.th_next_waiting;
- }
- }
- __kmp_printf_no_lock( "; tail: %d ", lck->lk.tail_id );
- __kmp_printf_no_lock( "\n\n" );
+#define TRACE_LOCK(X, Y) \
+ KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y);
+#define TRACE_LOCK_T(X, Y, Z) \
+ KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z);
+#define TRACE_LOCK_HT(X, Y, Z, Q) \
+ KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, \
+ Z, Q);
+
+static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid,
+ kmp_queuing_lock_t *lck, kmp_int32 head_id,
+ kmp_int32 tail_id) {
+ kmp_int32 t, i;
+
+ __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n");
+
+ i = tc % TRACE_BUF_ELE;
+ __kmp_printf_no_lock("%s\n", traces[i]);
+ i = (i + 1) % TRACE_BUF_ELE;
+ while (i != (tc % TRACE_BUF_ELE)) {
+ __kmp_printf_no_lock("%s", traces[i]);
+ i = (i + 1) % TRACE_BUF_ELE;
+ }
+ __kmp_printf_no_lock("\n");
+
+ __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, "
+ "next_wait:%d, head_id:%d, tail_id:%d\n",
+ gtid + 1, this_thr->th.th_spin_here,
+ this_thr->th.th_next_waiting, head_id, tail_id);
+
+ __kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id);
+
+ if (lck->lk.head_id >= 1) {
+ t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
+ while (t > 0) {
+ __kmp_printf_no_lock("-> %d ", t);
+ t = __kmp_threads[t - 1]->th.th_next_waiting;
+ }
+ }
+ __kmp_printf_no_lock("; tail: %d ", lck->lk.tail_id);
+ __kmp_printf_no_lock("\n\n");
}
#endif /* DEBUG_QUEUING_LOCKS */
-static kmp_int32
-__kmp_get_queuing_lock_owner( kmp_queuing_lock_t *lck )
-{
- return TCR_4( lck->lk.owner_id ) - 1;
+static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) {
+ return TCR_4(lck->lk.owner_id) - 1;
}
-static inline bool
-__kmp_is_queuing_lock_nestable( kmp_queuing_lock_t *lck )
-{
- return lck->lk.depth_locked != -1;
+static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) {
+ return lck->lk.depth_locked != -1;
}
/* Acquire a lock using a the queuing lock implementation */
template <bool takeTime>
-/* [TLW] The unused template above is left behind because of what BEB believes is a
- potential compiler problem with __forceinline. */
+/* [TLW] The unused template above is left behind because of what BEB believes
+ is a potential compiler problem with __forceinline. */
__forceinline static int
-__kmp_acquire_queuing_lock_timed_template( kmp_queuing_lock_t *lck,
- kmp_int32 gtid )
-{
- register kmp_info_t *this_thr = __kmp_thread_from_gtid( gtid );
- volatile kmp_int32 *head_id_p = & lck->lk.head_id;
- volatile kmp_int32 *tail_id_p = & lck->lk.tail_id;
- volatile kmp_uint32 *spin_here_p;
- kmp_int32 need_mf = 1;
+__kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ register kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid);
+ volatile kmp_int32 *head_id_p = &lck->lk.head_id;
+ volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
+ volatile kmp_uint32 *spin_here_p;
+ kmp_int32 need_mf = 1;
#if OMPT_SUPPORT
- ompt_state_t prev_state = ompt_state_undefined;
+ ompt_state_t prev_state = ompt_state_undefined;
#endif
- KA_TRACE( 1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid ));
+ KA_TRACE(1000,
+ ("__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
- KMP_FSYNC_PREPARE( lck );
- KMP_DEBUG_ASSERT( this_thr != NULL );
- spin_here_p = & this_thr->th.th_spin_here;
+ KMP_FSYNC_PREPARE(lck);
+ KMP_DEBUG_ASSERT(this_thr != NULL);
+ spin_here_p = &this_thr->th.th_spin_here;
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "acq ent" );
- if ( *spin_here_p )
- __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p );
- if ( this_thr->th.th_next_waiting != 0 )
- __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p );
-#endif
- KMP_DEBUG_ASSERT( !*spin_here_p );
- KMP_DEBUG_ASSERT( this_thr->th.th_next_waiting == 0 );
-
-
- /* The following st.rel to spin_here_p needs to precede the cmpxchg.acq to head_id_p
- that may follow, not just in execution order, but also in visibility order. This way,
- when a releasing thread observes the changes to the queue by this thread, it can
- rightly assume that spin_here_p has already been set to TRUE, so that when it sets
- spin_here_p to FALSE, it is not premature. If the releasing thread sets spin_here_p
- to FALSE before this thread sets it to TRUE, this thread will hang.
- */
- *spin_here_p = TRUE; /* before enqueuing to prevent race */
-
- while( 1 ) {
- kmp_int32 enqueued;
- kmp_int32 head;
- kmp_int32 tail;
+ TRACE_LOCK(gtid + 1, "acq ent");
+ if (*spin_here_p)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
+ if (this_thr->th.th_next_waiting != 0)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
+#endif
+ KMP_DEBUG_ASSERT(!*spin_here_p);
+ KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
+
+ /* The following st.rel to spin_here_p needs to precede the cmpxchg.acq to
+ head_id_p that may follow, not just in execution order, but also in
+ visibility order. This way, when a releasing thread observes the changes to
+ the queue by this thread, it can rightly assume that spin_here_p has
+ already been set to TRUE, so that when it sets spin_here_p to FALSE, it is
+ not premature. If the releasing thread sets spin_here_p to FALSE before
+ this thread sets it to TRUE, this thread will hang. */
+ *spin_here_p = TRUE; /* before enqueuing to prevent race */
+
+ while (1) {
+ kmp_int32 enqueued;
+ kmp_int32 head;
+ kmp_int32 tail;
- head = *head_id_p;
+ head = *head_id_p;
- switch ( head ) {
+ switch (head) {
- case -1:
- {
+ case -1: {
#ifdef DEBUG_QUEUING_LOCKS
- tail = *tail_id_p;
- TRACE_LOCK_HT( gtid+1, "acq read: ", head, tail );
+ tail = *tail_id_p;
+ TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
#endif
- tail = 0; /* to make sure next link asynchronously read is not set accidentally;
- this assignment prevents us from entering the if ( t > 0 )
- condition in the enqueued case below, which is not necessary for
- this state transition */
-
- need_mf = 0;
- /* try (-1,0)->(tid,tid) */
- enqueued = KMP_COMPARE_AND_STORE_ACQ64( (volatile kmp_int64 *) tail_id_p,
- KMP_PACK_64( -1, 0 ),
- KMP_PACK_64( gtid+1, gtid+1 ) );
+ tail = 0; /* to make sure next link asynchronously read is not set
+ accidentally; this assignment prevents us from entering the
+ if ( t > 0 ) condition in the enqueued case below, which is not
+ necessary for this state transition */
+
+ need_mf = 0;
+ /* try (-1,0)->(tid,tid) */
+ enqueued = KMP_COMPARE_AND_STORE_ACQ64((volatile kmp_int64 *)tail_id_p,
+ KMP_PACK_64(-1, 0),
+ KMP_PACK_64(gtid + 1, gtid + 1));
#ifdef DEBUG_QUEUING_LOCKS
- if ( enqueued ) TRACE_LOCK( gtid+1, "acq enq: (-1,0)->(tid,tid)" );
+ if (enqueued)
+ TRACE_LOCK(gtid + 1, "acq enq: (-1,0)->(tid,tid)");
#endif
- }
- break;
+ } break;
- default:
- {
- tail = *tail_id_p;
- KMP_DEBUG_ASSERT( tail != gtid + 1 );
+ default: {
+ tail = *tail_id_p;
+ KMP_DEBUG_ASSERT(tail != gtid + 1);
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK_HT( gtid+1, "acq read: ", head, tail );
+ TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
#endif
- if ( tail == 0 ) {
- enqueued = FALSE;
- }
- else {
- need_mf = 0;
- /* try (h,t) or (h,h)->(h,tid) */
- enqueued = KMP_COMPARE_AND_STORE_ACQ32( tail_id_p, tail, gtid+1 );
+ if (tail == 0) {
+ enqueued = FALSE;
+ } else {
+ need_mf = 0;
+ /* try (h,t) or (h,h)->(h,tid) */
+ enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1);
#ifdef DEBUG_QUEUING_LOCKS
- if ( enqueued ) TRACE_LOCK( gtid+1, "acq enq: (h,t)->(h,tid)" );
+ if (enqueued)
+ TRACE_LOCK(gtid + 1, "acq enq: (h,t)->(h,tid)");
#endif
- }
- }
- break;
-
- case 0: /* empty queue */
- {
- kmp_int32 grabbed_lock;
+ }
+ } break;
+
+ case 0: /* empty queue */
+ {
+ kmp_int32 grabbed_lock;
#ifdef DEBUG_QUEUING_LOCKS
- tail = *tail_id_p;
- TRACE_LOCK_HT( gtid+1, "acq read: ", head, tail );
+ tail = *tail_id_p;
+ TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
#endif
- /* try (0,0)->(-1,0) */
+ /* try (0,0)->(-1,0) */
- /* only legal transition out of head = 0 is head = -1 with no change to tail */
- grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32( head_id_p, 0, -1 );
+ /* only legal transition out of head = 0 is head = -1 with no change to
+ * tail */
+ grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1);
- if ( grabbed_lock ) {
+ if (grabbed_lock) {
- *spin_here_p = FALSE;
+ *spin_here_p = FALSE;
- KA_TRACE( 1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
- lck, gtid ));
+ KA_TRACE(
+ 1000,
+ ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
+ lck, gtid));
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK_HT( gtid+1, "acq exit: ", head, 0 );
+ TRACE_LOCK_HT(gtid + 1, "acq exit: ", head, 0);
#endif
#if OMPT_SUPPORT
- if (ompt_enabled && prev_state != ompt_state_undefined) {
- /* change the state before clearing wait_id */
- this_thr->th.ompt_thread_info.state = prev_state;
- this_thr->th.ompt_thread_info.wait_id = 0;
- }
+ if (ompt_enabled && prev_state != ompt_state_undefined) {
+ /* change the state before clearing wait_id */
+ this_thr->th.ompt_thread_info.state = prev_state;
+ this_thr->th.ompt_thread_info.wait_id = 0;
+ }
#endif
- KMP_FSYNC_ACQUIRED( lck );
- return KMP_LOCK_ACQUIRED_FIRST; /* lock holder cannot be on queue */
- }
- enqueued = FALSE;
- }
- break;
- }
+ KMP_FSYNC_ACQUIRED(lck);
+ return KMP_LOCK_ACQUIRED_FIRST; /* lock holder cannot be on queue */
+ }
+ enqueued = FALSE;
+ } break;
+ }
#if OMPT_SUPPORT
- if (ompt_enabled && prev_state == ompt_state_undefined) {
- /* this thread will spin; set wait_id before entering wait state */
- prev_state = this_thr->th.ompt_thread_info.state;
- this_thr->th.ompt_thread_info.wait_id = (uint64_t) lck;
- this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
- }
+ if (ompt_enabled && prev_state == ompt_state_undefined) {
+ /* this thread will spin; set wait_id before entering wait state */
+ prev_state = this_thr->th.ompt_thread_info.state;
+ this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
+ this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
+ }
#endif
- if ( enqueued ) {
- if ( tail > 0 ) {
- kmp_info_t *tail_thr = __kmp_thread_from_gtid( tail - 1 );
- KMP_ASSERT( tail_thr != NULL );
- tail_thr->th.th_next_waiting = gtid+1;
- /* corresponding wait for this write in release code */
- }
- KA_TRACE( 1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n", lck, gtid ));
-
+ if (enqueued) {
+ if (tail > 0) {
+ kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1);
+ KMP_ASSERT(tail_thr != NULL);
+ tail_thr->th.th_next_waiting = gtid + 1;
+ /* corresponding wait for this write in release code */
+ }
+ KA_TRACE(1000,
+ ("__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n",
+ lck, gtid));
- /* ToDo: May want to consider using __kmp_wait_sleep or something that sleeps for
- * throughput only here.
- */
- KMP_MB();
- KMP_WAIT_YIELD(spin_here_p, FALSE, KMP_EQ, lck);
+ /* ToDo: May want to consider using __kmp_wait_sleep or something that
+ sleeps for throughput only here. */
+ KMP_MB();
+ KMP_WAIT_YIELD(spin_here_p, FALSE, KMP_EQ, lck);
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "acq spin" );
+ TRACE_LOCK(gtid + 1, "acq spin");
- if ( this_thr->th.th_next_waiting != 0 )
- __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p );
+ if (this_thr->th.th_next_waiting != 0)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
#endif
- KMP_DEBUG_ASSERT( this_thr->th.th_next_waiting == 0 );
- KA_TRACE( 1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after waiting on queue\n",
- lck, gtid ));
+ KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
+ KA_TRACE(1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after "
+ "waiting on queue\n",
+ lck, gtid));
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "acq exit 2" );
+ TRACE_LOCK(gtid + 1, "acq exit 2");
#endif
#if OMPT_SUPPORT
- /* change the state before clearing wait_id */
- this_thr->th.ompt_thread_info.state = prev_state;
- this_thr->th.ompt_thread_info.wait_id = 0;
+ /* change the state before clearing wait_id */
+ this_thr->th.ompt_thread_info.state = prev_state;
+ this_thr->th.ompt_thread_info.wait_id = 0;
#endif
- /* got lock, we were dequeued by the thread that released lock */
- return KMP_LOCK_ACQUIRED_FIRST;
- }
+ /* got lock, we were dequeued by the thread that released lock */
+ return KMP_LOCK_ACQUIRED_FIRST;
+ }
- /* Yield if number of threads > number of logical processors */
- /* ToDo: Not sure why this should only be in oversubscription case,
- maybe should be traditional YIELD_INIT/YIELD_WHEN loop */
- KMP_YIELD( TCR_4( __kmp_nth ) > (__kmp_avail_proc ? __kmp_avail_proc :
- __kmp_xproc ) );
+ /* Yield if number of threads > number of logical processors */
+ /* ToDo: Not sure why this should only be in oversubscription case,
+ maybe should be traditional YIELD_INIT/YIELD_WHEN loop */
+ KMP_YIELD(TCR_4(__kmp_nth) >
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "acq retry" );
+ TRACE_LOCK(gtid + 1, "acq retry");
#endif
-
- }
- KMP_ASSERT2( 0, "should not get here" );
- return KMP_LOCK_ACQUIRED_FIRST;
+ }
+ KMP_ASSERT2(0, "should not get here");
+ return KMP_LOCK_ACQUIRED_FIRST;
}
-int
-__kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
+int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
- int retval = __kmp_acquire_queuing_lock_timed_template<false>( lck, gtid );
- ANNOTATE_QUEUING_ACQUIRED(lck);
- return retval;
-}
-
-static int
-__kmp_acquire_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
- kmp_int32 gtid )
-{
- char const * const func = "omp_set_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_queuing_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_queuing_lock_owner( lck ) == gtid ) {
- KMP_FATAL( LockIsAlreadyOwned, func );
- }
-
- __kmp_acquire_queuing_lock( lck, gtid );
-
- lck->lk.owner_id = gtid + 1;
- return KMP_LOCK_ACQUIRED_FIRST;
+ int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
+ ANNOTATE_QUEUING_ACQUIRED(lck);
+ return retval;
}
-int
-__kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- volatile kmp_int32 *head_id_p = & lck->lk.head_id;
- kmp_int32 head;
+static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_queuing_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_queuing_lock_owner(lck) == gtid) {
+ KMP_FATAL(LockIsAlreadyOwned, func);
+ }
+
+ __kmp_acquire_queuing_lock(lck, gtid);
+
+ lck->lk.owner_id = gtid + 1;
+ return KMP_LOCK_ACQUIRED_FIRST;
+}
+
+int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ volatile kmp_int32 *head_id_p = &lck->lk.head_id;
+ kmp_int32 head;
#ifdef KMP_DEBUG
- kmp_info_t *this_thr;
+ kmp_info_t *this_thr;
#endif
- KA_TRACE( 1000, ("__kmp_test_queuing_lock: T#%d entering\n", gtid ));
- KMP_DEBUG_ASSERT( gtid >= 0 );
+ KA_TRACE(1000, ("__kmp_test_queuing_lock: T#%d entering\n", gtid));
+ KMP_DEBUG_ASSERT(gtid >= 0);
#ifdef KMP_DEBUG
- this_thr = __kmp_thread_from_gtid( gtid );
- KMP_DEBUG_ASSERT( this_thr != NULL );
- KMP_DEBUG_ASSERT( !this_thr->th.th_spin_here );
-#endif
-
- head = *head_id_p;
-
- if ( head == 0 ) { /* nobody on queue, nobody holding */
+ this_thr = __kmp_thread_from_gtid(gtid);
+ KMP_DEBUG_ASSERT(this_thr != NULL);
+ KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
+#endif
+
+ head = *head_id_p;
+
+ if (head == 0) { /* nobody on queue, nobody holding */
+ /* try (0,0)->(-1,0) */
+ if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) {
+ KA_TRACE(1000,
+ ("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
+ KMP_FSYNC_ACQUIRED(lck);
+ ANNOTATE_QUEUING_ACQUIRED(lck);
+ return TRUE;
+ }
+ }
+
+ KA_TRACE(1000,
+ ("__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid));
+ return FALSE;
+}
+
+static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_queuing_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
- /* try (0,0)->(-1,0) */
-
- if ( KMP_COMPARE_AND_STORE_ACQ32( head_id_p, 0, -1 ) ) {
- KA_TRACE( 1000, ("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid ));
- KMP_FSYNC_ACQUIRED(lck);
- ANNOTATE_QUEUING_ACQUIRED(lck);
- return TRUE;
- }
- }
-
- KA_TRACE( 1000, ("__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid ));
- return FALSE;
-}
+ int retval = __kmp_test_queuing_lock(lck, gtid);
-static int
-__kmp_test_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_queuing_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
-
- int retval = __kmp_test_queuing_lock( lck, gtid );
-
- if ( retval ) {
- lck->lk.owner_id = gtid + 1;
- }
- return retval;
+ if (retval) {
+ lck->lk.owner_id = gtid + 1;
+ }
+ return retval;
}
-int
-__kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- register kmp_info_t *this_thr;
- volatile kmp_int32 *head_id_p = & lck->lk.head_id;
- volatile kmp_int32 *tail_id_p = & lck->lk.tail_id;
-
- KA_TRACE( 1000, ("__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid ));
- KMP_DEBUG_ASSERT( gtid >= 0 );
- this_thr = __kmp_thread_from_gtid( gtid );
- KMP_DEBUG_ASSERT( this_thr != NULL );
-#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "rel ent" );
-
- if ( this_thr->th.th_spin_here )
- __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p );
- if ( this_thr->th.th_next_waiting != 0 )
- __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p );
+int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ register kmp_info_t *this_thr;
+ volatile kmp_int32 *head_id_p = &lck->lk.head_id;
+ volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
+
+ KA_TRACE(1000,
+ ("__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
+ KMP_DEBUG_ASSERT(gtid >= 0);
+ this_thr = __kmp_thread_from_gtid(gtid);
+ KMP_DEBUG_ASSERT(this_thr != NULL);
+#ifdef DEBUG_QUEUING_LOCKS
+ TRACE_LOCK(gtid + 1, "rel ent");
+
+ if (this_thr->th.th_spin_here)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
+ if (this_thr->th.th_next_waiting != 0)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
#endif
- KMP_DEBUG_ASSERT( !this_thr->th.th_spin_here );
- KMP_DEBUG_ASSERT( this_thr->th.th_next_waiting == 0 );
+ KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
+ KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
- KMP_FSYNC_RELEASING(lck);
- ANNOTATE_QUEUING_RELEASED(lck);
+ KMP_FSYNC_RELEASING(lck);
+ ANNOTATE_QUEUING_RELEASED(lck);
- while( 1 ) {
- kmp_int32 dequeued;
- kmp_int32 head;
- kmp_int32 tail;
+ while (1) {
+ kmp_int32 dequeued;
+ kmp_int32 head;
+ kmp_int32 tail;
- head = *head_id_p;
+ head = *head_id_p;
#ifdef DEBUG_QUEUING_LOCKS
- tail = *tail_id_p;
- TRACE_LOCK_HT( gtid+1, "rel read: ", head, tail );
- if ( head == 0 ) __kmp_dump_queuing_lock( this_thr, gtid, lck, head, tail );
-#endif
- KMP_DEBUG_ASSERT( head != 0 ); /* holding the lock, head must be -1 or queue head */
-
- if ( head == -1 ) { /* nobody on queue */
-
- /* try (-1,0)->(0,0) */
- if ( KMP_COMPARE_AND_STORE_REL32( head_id_p, -1, 0 ) ) {
- KA_TRACE( 1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
- lck, gtid ));
+ tail = *tail_id_p;
+ TRACE_LOCK_HT(gtid + 1, "rel read: ", head, tail);
+ if (head == 0)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
+#endif
+ KMP_DEBUG_ASSERT(head !=
+ 0); /* holding the lock, head must be -1 or queue head */
+
+ if (head == -1) { /* nobody on queue */
+ /* try (-1,0)->(0,0) */
+ if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) {
+ KA_TRACE(
+ 1000,
+ ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
+ lck, gtid));
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK_HT( gtid+1, "rel exit: ", 0, 0 );
+ TRACE_LOCK_HT(gtid + 1, "rel exit: ", 0, 0);
#endif
#if OMPT_SUPPORT
- /* nothing to do - no other thread is trying to shift blame */
+/* nothing to do - no other thread is trying to shift blame */
#endif
-
- return KMP_LOCK_RELEASED;
- }
- dequeued = FALSE;
-
- }
- else {
-
- tail = *tail_id_p;
- if ( head == tail ) { /* only one thread on the queue */
-
+ return KMP_LOCK_RELEASED;
+ }
+ dequeued = FALSE;
+ } else {
+ tail = *tail_id_p;
+ if (head == tail) { /* only one thread on the queue */
#ifdef DEBUG_QUEUING_LOCKS
- if ( head <= 0 ) __kmp_dump_queuing_lock( this_thr, gtid, lck, head, tail );
+ if (head <= 0)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
#endif
- KMP_DEBUG_ASSERT( head > 0 );
+ KMP_DEBUG_ASSERT(head > 0);
- /* try (h,h)->(-1,0) */
- dequeued = KMP_COMPARE_AND_STORE_REL64( (kmp_int64 *) tail_id_p,
- KMP_PACK_64( head, head ), KMP_PACK_64( -1, 0 ) );
+ /* try (h,h)->(-1,0) */
+ dequeued = KMP_COMPARE_AND_STORE_REL64((kmp_int64 *)tail_id_p,
+ KMP_PACK_64(head, head),
+ KMP_PACK_64(-1, 0));
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "rel deq: (h,h)->(-1,0)" );
+ TRACE_LOCK(gtid + 1, "rel deq: (h,h)->(-1,0)");
#endif
- }
- else {
- volatile kmp_int32 *waiting_id_p;
- kmp_info_t *head_thr = __kmp_thread_from_gtid( head - 1 );
- KMP_DEBUG_ASSERT( head_thr != NULL );
- waiting_id_p = & head_thr->th.th_next_waiting;
+ } else {
+ volatile kmp_int32 *waiting_id_p;
+ kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
+ KMP_DEBUG_ASSERT(head_thr != NULL);
+ waiting_id_p = &head_thr->th.th_next_waiting;
- /* Does this require synchronous reads? */
+/* Does this require synchronous reads? */
#ifdef DEBUG_QUEUING_LOCKS
- if ( head <= 0 || tail <= 0 ) __kmp_dump_queuing_lock( this_thr, gtid, lck, head, tail );
+ if (head <= 0 || tail <= 0)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
#endif
- KMP_DEBUG_ASSERT( head > 0 && tail > 0 );
-
- /* try (h,t)->(h',t) or (t,t) */
+ KMP_DEBUG_ASSERT(head > 0 && tail > 0);
- KMP_MB();
- /* make sure enqueuing thread has time to update next waiting thread field */
- *head_id_p = KMP_WAIT_YIELD((volatile kmp_uint32*)waiting_id_p, 0, KMP_NEQ, NULL);
+ /* try (h,t)->(h',t) or (t,t) */
+ KMP_MB();
+ /* make sure enqueuing thread has time to update next waiting thread
+ * field */
+ *head_id_p = KMP_WAIT_YIELD((volatile kmp_uint32 *)waiting_id_p, 0,
+ KMP_NEQ, NULL);
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "rel deq: (h,t)->(h',t)" );
+ TRACE_LOCK(gtid + 1, "rel deq: (h,t)->(h',t)");
#endif
- dequeued = TRUE;
- }
- }
+ dequeued = TRUE;
+ }
+ }
- if ( dequeued ) {
- kmp_info_t *head_thr = __kmp_thread_from_gtid( head - 1 );
- KMP_DEBUG_ASSERT( head_thr != NULL );
+ if (dequeued) {
+ kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
+ KMP_DEBUG_ASSERT(head_thr != NULL);
- /* Does this require synchronous reads? */
+/* Does this require synchronous reads? */
#ifdef DEBUG_QUEUING_LOCKS
- if ( head <= 0 || tail <= 0 ) __kmp_dump_queuing_lock( this_thr, gtid, lck, head, tail );
+ if (head <= 0 || tail <= 0)
+ __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
#endif
- KMP_DEBUG_ASSERT( head > 0 && tail > 0 );
+ KMP_DEBUG_ASSERT(head > 0 && tail > 0);
- /* For clean code only.
- * Thread not released until next statement prevents race with acquire code.
- */
- head_thr->th.th_next_waiting = 0;
+ /* For clean code only. Thread not released until next statement prevents
+ race with acquire code. */
+ head_thr->th.th_next_waiting = 0;
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK_T( gtid+1, "rel nw=0 for t=", head );
+ TRACE_LOCK_T(gtid + 1, "rel nw=0 for t=", head);
#endif
- KMP_MB();
- /* reset spin value */
- head_thr->th.th_spin_here = FALSE;
+ KMP_MB();
+ /* reset spin value */
+ head_thr->th.th_spin_here = FALSE;
- KA_TRACE( 1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: after dequeuing\n",
- lck, gtid ));
+ KA_TRACE(1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: after "
+ "dequeuing\n",
+ lck, gtid));
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "rel exit 2" );
+ TRACE_LOCK(gtid + 1, "rel exit 2");
#endif
- return KMP_LOCK_RELEASED;
- }
- /* KMP_CPU_PAUSE( ); don't want to make releasing thread hold up acquiring threads */
+ return KMP_LOCK_RELEASED;
+ }
+/* KMP_CPU_PAUSE(); don't want to make releasing thread hold up acquiring
+ threads */
#ifdef DEBUG_QUEUING_LOCKS
- TRACE_LOCK( gtid+1, "rel retry" );
+ TRACE_LOCK(gtid + 1, "rel retry");
#endif
- } /* while */
- KMP_ASSERT2( 0, "should not get here" );
- return KMP_LOCK_RELEASED;
+ } /* while */
+ KMP_ASSERT2(0, "should not get here");
+ return KMP_LOCK_RELEASED;
}
-static int
-__kmp_release_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
- kmp_int32 gtid )
-{
- char const * const func = "omp_unset_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_queuing_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_queuing_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( __kmp_get_queuing_lock_owner( lck ) != gtid ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- lck->lk.owner_id = 0;
- return __kmp_release_queuing_lock( lck, gtid );
+static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_queuing_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_queuing_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if (__kmp_get_queuing_lock_owner(lck) != gtid) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ lck->lk.owner_id = 0;
+ return __kmp_release_queuing_lock(lck, gtid);
}
-void
-__kmp_init_queuing_lock( kmp_queuing_lock_t *lck )
-{
- lck->lk.location = NULL;
- lck->lk.head_id = 0;
- lck->lk.tail_id = 0;
- lck->lk.next_ticket = 0;
- lck->lk.now_serving = 0;
- lck->lk.owner_id = 0; // no thread owns the lock.
- lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
- lck->lk.initialized = lck;
+void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) {
+ lck->lk.location = NULL;
+ lck->lk.head_id = 0;
+ lck->lk.tail_id = 0;
+ lck->lk.next_ticket = 0;
+ lck->lk.now_serving = 0;
+ lck->lk.owner_id = 0; // no thread owns the lock.
+ lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
+ lck->lk.initialized = lck;
- KA_TRACE(1000, ("__kmp_init_queuing_lock: lock %p initialized\n", lck));
+ KA_TRACE(1000, ("__kmp_init_queuing_lock: lock %p initialized\n", lck));
}
-static void
-__kmp_init_queuing_lock_with_checks( kmp_queuing_lock_t * lck )
-{
- __kmp_init_queuing_lock( lck );
+static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
+ __kmp_init_queuing_lock(lck);
}
-void
-__kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck )
-{
- lck->lk.initialized = NULL;
- lck->lk.location = NULL;
- lck->lk.head_id = 0;
- lck->lk.tail_id = 0;
- lck->lk.next_ticket = 0;
- lck->lk.now_serving = 0;
- lck->lk.owner_id = 0;
- lck->lk.depth_locked = -1;
+void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) {
+ lck->lk.initialized = NULL;
+ lck->lk.location = NULL;
+ lck->lk.head_id = 0;
+ lck->lk.tail_id = 0;
+ lck->lk.next_ticket = 0;
+ lck->lk.now_serving = 0;
+ lck->lk.owner_id = 0;
+ lck->lk.depth_locked = -1;
}
-static void
-__kmp_destroy_queuing_lock_with_checks( kmp_queuing_lock_t *lck )
-{
- char const * const func = "omp_destroy_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_queuing_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_queuing_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_queuing_lock( lck );
+static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
+ char const *const func = "omp_destroy_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_queuing_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_queuing_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_queuing_lock(lck);
}
-
-//
// nested queuing locks
-//
-int
-__kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
-
- if ( __kmp_get_queuing_lock_owner( lck ) == gtid ) {
- lck->lk.depth_locked += 1;
- return KMP_LOCK_ACQUIRED_NEXT;
- }
- else {
- __kmp_acquire_queuing_lock_timed_template<false>( lck, gtid );
- ANNOTATE_QUEUING_ACQUIRED(lck);
- KMP_MB();
- lck->lk.depth_locked = 1;
- KMP_MB();
- lck->lk.owner_id = gtid + 1;
- return KMP_LOCK_ACQUIRED_FIRST;
- }
+int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
+
+ if (__kmp_get_queuing_lock_owner(lck) == gtid) {
+ lck->lk.depth_locked += 1;
+ return KMP_LOCK_ACQUIRED_NEXT;
+ } else {
+ __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
+ ANNOTATE_QUEUING_ACQUIRED(lck);
+ KMP_MB();
+ lck->lk.depth_locked = 1;
+ KMP_MB();
+ lck->lk.owner_id = gtid + 1;
+ return KMP_LOCK_ACQUIRED_FIRST;
+ }
}
static int
-__kmp_acquire_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_nest_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_queuing_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_acquire_nested_queuing_lock( lck, gtid );
+__kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_nest_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_queuing_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_acquire_nested_queuing_lock(lck, gtid);
}
-int
-__kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- int retval;
+int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ int retval;
- KMP_DEBUG_ASSERT( gtid >= 0 );
+ KMP_DEBUG_ASSERT(gtid >= 0);
- if ( __kmp_get_queuing_lock_owner( lck ) == gtid ) {
- retval = ++lck->lk.depth_locked;
- }
- else if ( !__kmp_test_queuing_lock( lck, gtid ) ) {
- retval = 0;
- }
- else {
- KMP_MB();
- retval = lck->lk.depth_locked = 1;
- KMP_MB();
- lck->lk.owner_id = gtid + 1;
- }
- return retval;
+ if (__kmp_get_queuing_lock_owner(lck) == gtid) {
+ retval = ++lck->lk.depth_locked;
+ } else if (!__kmp_test_queuing_lock(lck, gtid)) {
+ retval = 0;
+ } else {
+ KMP_MB();
+ retval = lck->lk.depth_locked = 1;
+ KMP_MB();
+ lck->lk.owner_id = gtid + 1;
+ }
+ return retval;
}
-static int
-__kmp_test_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
- kmp_int32 gtid )
-{
- char const * const func = "omp_test_nest_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_queuing_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_test_nested_queuing_lock( lck, gtid );
+static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_nest_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_queuing_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_test_nested_queuing_lock(lck, gtid);
}
-int
-__kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
+int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
+ KMP_MB();
+ if (--(lck->lk.depth_locked) == 0) {
KMP_MB();
- if ( --(lck->lk.depth_locked) == 0 ) {
- KMP_MB();
- lck->lk.owner_id = 0;
- __kmp_release_queuing_lock( lck, gtid );
- return KMP_LOCK_RELEASED;
- }
- return KMP_LOCK_STILL_HELD;
+ lck->lk.owner_id = 0;
+ __kmp_release_queuing_lock(lck, gtid);
+ return KMP_LOCK_RELEASED;
+ }
+ return KMP_LOCK_STILL_HELD;
}
static int
-__kmp_release_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_nest_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_queuing_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_queuing_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( __kmp_get_queuing_lock_owner( lck ) != gtid ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- return __kmp_release_nested_queuing_lock( lck, gtid );
-}
-
-void
-__kmp_init_nested_queuing_lock( kmp_queuing_lock_t * lck )
-{
- __kmp_init_queuing_lock( lck );
- lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
+__kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_nest_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_queuing_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_queuing_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if (__kmp_get_queuing_lock_owner(lck) != gtid) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ return __kmp_release_nested_queuing_lock(lck, gtid);
+}
+
+void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) {
+ __kmp_init_queuing_lock(lck);
+ lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
}
static void
-__kmp_init_nested_queuing_lock_with_checks( kmp_queuing_lock_t * lck )
-{
- __kmp_init_nested_queuing_lock( lck );
-}
-
-void
-__kmp_destroy_nested_queuing_lock( kmp_queuing_lock_t *lck )
-{
- __kmp_destroy_queuing_lock( lck );
- lck->lk.depth_locked = 0;
+__kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
+ __kmp_init_nested_queuing_lock(lck);
+}
+
+void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) {
+ __kmp_destroy_queuing_lock(lck);
+ lck->lk.depth_locked = 0;
}
static void
-__kmp_destroy_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck )
-{
- char const * const func = "omp_destroy_nest_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_queuing_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_queuing_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_nested_queuing_lock( lck );
+__kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
+ char const *const func = "omp_destroy_nest_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_queuing_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_queuing_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_nested_queuing_lock(lck);
}
-
-//
// access functions to fields which don't exist for all lock kinds.
-//
-static int
-__kmp_is_queuing_lock_initialized( kmp_queuing_lock_t *lck )
-{
- return lck == lck->lk.initialized;
+static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) {
+ return lck == lck->lk.initialized;
}
-static const ident_t *
-__kmp_get_queuing_lock_location( kmp_queuing_lock_t *lck )
-{
- return lck->lk.location;
+static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) {
+ return lck->lk.location;
}
-static void
-__kmp_set_queuing_lock_location( kmp_queuing_lock_t *lck, const ident_t *loc )
-{
- lck->lk.location = loc;
+static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck,
+ const ident_t *loc) {
+ lck->lk.location = loc;
}
-static kmp_lock_flags_t
-__kmp_get_queuing_lock_flags( kmp_queuing_lock_t *lck )
-{
- return lck->lk.flags;
+static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) {
+ return lck->lk.flags;
}
-static void
-__kmp_set_queuing_lock_flags( kmp_queuing_lock_t *lck, kmp_lock_flags_t flags )
-{
- lck->lk.flags = flags;
+static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
+ kmp_lock_flags_t flags) {
+ lck->lk.flags = flags;
}
#if KMP_USE_ADAPTIVE_LOCKS
-/*
- RTM Adaptive locks
-*/
+/* RTM Adaptive locks */
#if KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300
#include <immintrin.h>
-#define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
+#define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
#else
// Values from the status register after failed speculation.
-#define _XBEGIN_STARTED (~0u)
-#define _XABORT_EXPLICIT (1 << 0)
-#define _XABORT_RETRY (1 << 1)
-#define _XABORT_CONFLICT (1 << 2)
-#define _XABORT_CAPACITY (1 << 3)
-#define _XABORT_DEBUG (1 << 4)
-#define _XABORT_NESTED (1 << 5)
-#define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF))
+#define _XBEGIN_STARTED (~0u)
+#define _XABORT_EXPLICIT (1 << 0)
+#define _XABORT_RETRY (1 << 1)
+#define _XABORT_CONFLICT (1 << 2)
+#define _XABORT_CAPACITY (1 << 3)
+#define _XABORT_DEBUG (1 << 4)
+#define _XABORT_NESTED (1 << 5)
+#define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF))
// Aborts for which it's worth trying again immediately
-#define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
+#define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
#define STRINGIZE_INTERNAL(arg) #arg
#define STRINGIZE(arg) STRINGIZE_INTERNAL(arg)
// Access to RTM instructions
-
-/*
- A version of XBegin which returns -1 on speculation, and the value of EAX on an abort.
- This is the same definition as the compiler intrinsic that will be supported at some point.
-*/
-static __inline int _xbegin()
-{
- int res = -1;
+/*A version of XBegin which returns -1 on speculation, and the value of EAX on
+ an abort. This is the same definition as the compiler intrinsic that will be
+ supported at some point. */
+static __inline int _xbegin() {
+ int res = -1;
#if KMP_OS_WINDOWS
#if KMP_ARCH_X86_64
- _asm {
+ _asm {
_emit 0xC7
_emit 0xF8
_emit 2
@@ -1955,9 +1800,9 @@ static __inline int _xbegin()
jmp L2
mov res, eax
L2:
- }
+ }
#else /* IA32 */
- _asm {
+ _asm {
_emit 0xC7
_emit 0xF8
_emit 2
@@ -1967,68 +1812,58 @@ static __inline int _xbegin()
jmp L2
mov res, eax
L2:
- }
+ }
#endif // KMP_ARCH_X86_64
#else
- /* Note that %eax must be noted as killed (clobbered), because
- * the XSR is returned in %eax(%rax) on abort. Other register
- * values are restored, so don't need to be killed.
- *
- * We must also mark 'res' as an input and an output, since otherwise
- * 'res=-1' may be dropped as being dead, whereas we do need the
- * assignment on the successful (i.e., non-abort) path.
- */
- __asm__ volatile ("1: .byte 0xC7; .byte 0xF8;\n"
- " .long 1f-1b-6\n"
- " jmp 2f\n"
- "1: movl %%eax,%0\n"
- "2:"
- :"+r"(res)::"memory","%eax");
+ /* Note that %eax must be noted as killed (clobbered), because the XSR is
+ returned in %eax(%rax) on abort. Other register values are restored, so
+ don't need to be killed.
+
+ We must also mark 'res' as an input and an output, since otherwise
+ 'res=-1' may be dropped as being dead, whereas we do need the assignment on
+ the successful (i.e., non-abort) path. */
+ __asm__ volatile("1: .byte 0xC7; .byte 0xF8;\n"
+ " .long 1f-1b-6\n"
+ " jmp 2f\n"
+ "1: movl %%eax,%0\n"
+ "2:"
+ : "+r"(res)::"memory", "%eax");
#endif // KMP_OS_WINDOWS
- return res;
+ return res;
}
-/*
- Transaction end
-*/
-static __inline void _xend()
-{
+/* Transaction end */
+static __inline void _xend() {
#if KMP_OS_WINDOWS
- __asm {
+ __asm {
_emit 0x0f
_emit 0x01
_emit 0xd5
- }
+ }
#else
- __asm__ volatile (".byte 0x0f; .byte 0x01; .byte 0xd5" :::"memory");
+ __asm__ volatile(".byte 0x0f; .byte 0x01; .byte 0xd5" ::: "memory");
#endif
}
-/*
- This is a macro, the argument must be a single byte constant which
- can be evaluated by the inline assembler, since it is emitted as a
- byte into the assembly code.
-*/
+/* This is a macro, the argument must be a single byte constant which can be
+ evaluated by the inline assembler, since it is emitted as a byte into the
+ assembly code. */
+// clang-format off
#if KMP_OS_WINDOWS
-#define _xabort(ARG) \
- _asm _emit 0xc6 \
- _asm _emit 0xf8 \
- _asm _emit ARG
+#define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG
#else
-#define _xabort(ARG) \
- __asm__ volatile (".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG) :::"memory");
+#define _xabort(ARG) \
+ __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory");
#endif
-
+// clang-format on
#endif // KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300
-//
-// Statistics is collected for testing purpose
-//
+// Statistics is collected for testing purpose
#if KMP_DEBUG_ADAPTIVE_LOCKS
-// We accumulate speculative lock statistics when the lock is destroyed.
-// We keep locks that haven't been destroyed in the liveLocks list
-// so that we can grab their statistics too.
+// We accumulate speculative lock statistics when the lock is destroyed. We
+// keep locks that haven't been destroyed in the liveLocks list so that we can
+// grab their statistics too.
static kmp_adaptive_lock_statistics_t destroyedStats;
// To hold the list of live locks.
@@ -2038,1057 +1873,922 @@ static kmp_adaptive_lock_info_t liveLock
static kmp_bootstrap_lock_t chain_lock;
// Initialize the list of stats.
-void
-__kmp_init_speculative_stats()
-{
- kmp_adaptive_lock_info_t *lck = &liveLocks;
-
- memset( ( void * ) & ( lck->stats ), 0, sizeof( lck->stats ) );
- lck->stats.next = lck;
- lck->stats.prev = lck;
+void __kmp_init_speculative_stats() {
+ kmp_adaptive_lock_info_t *lck = &liveLocks;
- KMP_ASSERT( lck->stats.next->stats.prev == lck );
- KMP_ASSERT( lck->stats.prev->stats.next == lck );
+ memset((void *)&(lck->stats), 0, sizeof(lck->stats));
+ lck->stats.next = lck;
+ lck->stats.prev = lck;
- __kmp_init_bootstrap_lock( &chain_lock );
+ KMP_ASSERT(lck->stats.next->stats.prev == lck);
+ KMP_ASSERT(lck->stats.prev->stats.next == lck);
+ __kmp_init_bootstrap_lock(&chain_lock);
}
// Insert the lock into the circular list
-static void
-__kmp_remember_lock( kmp_adaptive_lock_info_t * lck )
-{
- __kmp_acquire_bootstrap_lock( &chain_lock );
+static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) {
+ __kmp_acquire_bootstrap_lock(&chain_lock);
- lck->stats.next = liveLocks.stats.next;
- lck->stats.prev = &liveLocks;
+ lck->stats.next = liveLocks.stats.next;
+ lck->stats.prev = &liveLocks;
- liveLocks.stats.next = lck;
- lck->stats.next->stats.prev = lck;
+ liveLocks.stats.next = lck;
+ lck->stats.next->stats.prev = lck;
- KMP_ASSERT( lck->stats.next->stats.prev == lck );
- KMP_ASSERT( lck->stats.prev->stats.next == lck );
+ KMP_ASSERT(lck->stats.next->stats.prev == lck);
+ KMP_ASSERT(lck->stats.prev->stats.next == lck);
- __kmp_release_bootstrap_lock( &chain_lock );
+ __kmp_release_bootstrap_lock(&chain_lock);
}
-static void
-__kmp_forget_lock( kmp_adaptive_lock_info_t * lck )
-{
- KMP_ASSERT( lck->stats.next->stats.prev == lck );
- KMP_ASSERT( lck->stats.prev->stats.next == lck );
+static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) {
+ KMP_ASSERT(lck->stats.next->stats.prev == lck);
+ KMP_ASSERT(lck->stats.prev->stats.next == lck);
- kmp_adaptive_lock_info_t * n = lck->stats.next;
- kmp_adaptive_lock_info_t * p = lck->stats.prev;
+ kmp_adaptive_lock_info_t *n = lck->stats.next;
+ kmp_adaptive_lock_info_t *p = lck->stats.prev;
- n->stats.prev = p;
- p->stats.next = n;
+ n->stats.prev = p;
+ p->stats.next = n;
}
-static void
-__kmp_zero_speculative_stats( kmp_adaptive_lock_info_t * lck )
-{
- memset( ( void * )&lck->stats, 0, sizeof( lck->stats ) );
- __kmp_remember_lock( lck );
+static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) {
+ memset((void *)&lck->stats, 0, sizeof(lck->stats));
+ __kmp_remember_lock(lck);
}
-static void
-__kmp_add_stats( kmp_adaptive_lock_statistics_t * t, kmp_adaptive_lock_info_t * lck )
-{
- kmp_adaptive_lock_statistics_t volatile *s = &lck->stats;
-
- t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
- t->successfulSpeculations += s->successfulSpeculations;
- t->hardFailedSpeculations += s->hardFailedSpeculations;
- t->softFailedSpeculations += s->softFailedSpeculations;
- t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
- t->lemmingYields += s->lemmingYields;
+static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
+ kmp_adaptive_lock_info_t *lck) {
+ kmp_adaptive_lock_statistics_t volatile *s = &lck->stats;
+
+ t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
+ t->successfulSpeculations += s->successfulSpeculations;
+ t->hardFailedSpeculations += s->hardFailedSpeculations;
+ t->softFailedSpeculations += s->softFailedSpeculations;
+ t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
+ t->lemmingYields += s->lemmingYields;
}
-static void
-__kmp_accumulate_speculative_stats( kmp_adaptive_lock_info_t * lck)
-{
- kmp_adaptive_lock_statistics_t *t = &destroyedStats;
+static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) {
+ kmp_adaptive_lock_statistics_t *t = &destroyedStats;
- __kmp_acquire_bootstrap_lock( &chain_lock );
+ __kmp_acquire_bootstrap_lock(&chain_lock);
- __kmp_add_stats( &destroyedStats, lck );
- __kmp_forget_lock( lck );
+ __kmp_add_stats(&destroyedStats, lck);
+ __kmp_forget_lock(lck);
- __kmp_release_bootstrap_lock( &chain_lock );
+ __kmp_release_bootstrap_lock(&chain_lock);
}
-static float
-percent (kmp_uint32 count, kmp_uint32 total)
-{
- return (total == 0) ? 0.0: (100.0 * count)/total;
+static float percent(kmp_uint32 count, kmp_uint32 total) {
+ return (total == 0) ? 0.0 : (100.0 * count) / total;
}
-static
-FILE * __kmp_open_stats_file()
-{
- if (strcmp (__kmp_speculative_statsfile, "-") == 0)
- return stdout;
+static FILE *__kmp_open_stats_file() {
+ if (strcmp(__kmp_speculative_statsfile, "-") == 0)
+ return stdout;
- size_t buffLen = KMP_STRLEN( __kmp_speculative_statsfile ) + 20;
- char buffer[buffLen];
- KMP_SNPRINTF (&buffer[0], buffLen, __kmp_speculative_statsfile,
- (kmp_int32)getpid());
- FILE * result = fopen(&buffer[0], "w");
+ size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20;
+ char buffer[buffLen];
+ KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
+ (kmp_int32)getpid());
+ FILE *result = fopen(&buffer[0], "w");
- // Maybe we should issue a warning here...
- return result ? result : stdout;
+ // Maybe we should issue a warning here...
+ return result ? result : stdout;
}
-void
-__kmp_print_speculative_stats()
-{
- if (__kmp_user_lock_kind != lk_adaptive)
- return;
+void __kmp_print_speculative_stats() {
+ if (__kmp_user_lock_kind != lk_adaptive)
+ return;
- FILE * statsFile = __kmp_open_stats_file();
+ FILE *statsFile = __kmp_open_stats_file();
- kmp_adaptive_lock_statistics_t total = destroyedStats;
- kmp_adaptive_lock_info_t *lck;
+ kmp_adaptive_lock_statistics_t total = destroyedStats;
+ kmp_adaptive_lock_info_t *lck;
- for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
- __kmp_add_stats( &total, lck );
- }
- kmp_adaptive_lock_statistics_t *t = &total;
- kmp_uint32 totalSections = t->nonSpeculativeAcquires + t->successfulSpeculations;
- kmp_uint32 totalSpeculations = t->successfulSpeculations + t->hardFailedSpeculations +
- t->softFailedSpeculations;
-
- fprintf ( statsFile, "Speculative lock statistics (all approximate!)\n");
- fprintf ( statsFile, " Lock parameters: \n"
- " max_soft_retries : %10d\n"
- " max_badness : %10d\n",
- __kmp_adaptive_backoff_params.max_soft_retries,
- __kmp_adaptive_backoff_params.max_badness);
- fprintf( statsFile, " Non-speculative acquire attempts : %10d\n", t->nonSpeculativeAcquireAttempts );
- fprintf( statsFile, " Total critical sections : %10d\n", totalSections );
- fprintf( statsFile, " Successful speculations : %10d (%5.1f%%)\n",
- t->successfulSpeculations, percent( t->successfulSpeculations, totalSections ) );
- fprintf( statsFile, " Non-speculative acquires : %10d (%5.1f%%)\n",
- t->nonSpeculativeAcquires, percent( t->nonSpeculativeAcquires, totalSections ) );
- fprintf( statsFile, " Lemming yields : %10d\n\n", t->lemmingYields );
-
- fprintf( statsFile, " Speculative acquire attempts : %10d\n", totalSpeculations );
- fprintf( statsFile, " Successes : %10d (%5.1f%%)\n",
- t->successfulSpeculations, percent( t->successfulSpeculations, totalSpeculations ) );
- fprintf( statsFile, " Soft failures : %10d (%5.1f%%)\n",
- t->softFailedSpeculations, percent( t->softFailedSpeculations, totalSpeculations ) );
- fprintf( statsFile, " Hard failures : %10d (%5.1f%%)\n",
- t->hardFailedSpeculations, percent( t->hardFailedSpeculations, totalSpeculations ) );
+ for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
+ __kmp_add_stats(&total, lck);
+ }
+ kmp_adaptive_lock_statistics_t *t = &total;
+ kmp_uint32 totalSections =
+ t->nonSpeculativeAcquires + t->successfulSpeculations;
+ kmp_uint32 totalSpeculations = t->successfulSpeculations +
+ t->hardFailedSpeculations +
+ t->softFailedSpeculations;
- if (statsFile != stdout)
- fclose( statsFile );
+ fprintf(statsFile, "Speculative lock statistics (all approximate!)\n");
+ fprintf(statsFile, " Lock parameters: \n"
+ " max_soft_retries : %10d\n"
+ " max_badness : %10d\n",
+ __kmp_adaptive_backoff_params.max_soft_retries,
+ __kmp_adaptive_backoff_params.max_badness);
+ fprintf(statsFile, " Non-speculative acquire attempts : %10d\n",
+ t->nonSpeculativeAcquireAttempts);
+ fprintf(statsFile, " Total critical sections : %10d\n",
+ totalSections);
+ fprintf(statsFile, " Successful speculations : %10d (%5.1f%%)\n",
+ t->successfulSpeculations,
+ percent(t->successfulSpeculations, totalSections));
+ fprintf(statsFile, " Non-speculative acquires : %10d (%5.1f%%)\n",
+ t->nonSpeculativeAcquires,
+ percent(t->nonSpeculativeAcquires, totalSections));
+ fprintf(statsFile, " Lemming yields : %10d\n\n",
+ t->lemmingYields);
+
+ fprintf(statsFile, " Speculative acquire attempts : %10d\n",
+ totalSpeculations);
+ fprintf(statsFile, " Successes : %10d (%5.1f%%)\n",
+ t->successfulSpeculations,
+ percent(t->successfulSpeculations, totalSpeculations));
+ fprintf(statsFile, " Soft failures : %10d (%5.1f%%)\n",
+ t->softFailedSpeculations,
+ percent(t->softFailedSpeculations, totalSpeculations));
+ fprintf(statsFile, " Hard failures : %10d (%5.1f%%)\n",
+ t->hardFailedSpeculations,
+ percent(t->hardFailedSpeculations, totalSpeculations));
+
+ if (statsFile != stdout)
+ fclose(statsFile);
}
-# define KMP_INC_STAT(lck,stat) ( lck->lk.adaptive.stats.stat++ )
+#define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
#else
-# define KMP_INC_STAT(lck,stat)
+#define KMP_INC_STAT(lck, stat)
#endif // KMP_DEBUG_ADAPTIVE_LOCKS
-static inline bool
-__kmp_is_unlocked_queuing_lock( kmp_queuing_lock_t *lck )
-{
- // It is enough to check that the head_id is zero.
- // We don't also need to check the tail.
- bool res = lck->lk.head_id == 0;
+static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) {
+ // It is enough to check that the head_id is zero.
+ // We don't also need to check the tail.
+ bool res = lck->lk.head_id == 0;
- // We need a fence here, since we must ensure that no memory operations
- // from later in this thread float above that read.
+// We need a fence here, since we must ensure that no memory operations
+// from later in this thread float above that read.
#if KMP_COMPILER_ICC
- _mm_mfence();
+ _mm_mfence();
#else
- __sync_synchronize();
+ __sync_synchronize();
#endif
- return res;
+ return res;
}
// Functions for manipulating the badness
static __inline void
-__kmp_update_badness_after_success( kmp_adaptive_lock_t *lck )
-{
- // Reset the badness to zero so we eagerly try to speculate again
- lck->lk.adaptive.badness = 0;
- KMP_INC_STAT(lck,successfulSpeculations);
+__kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) {
+ // Reset the badness to zero so we eagerly try to speculate again
+ lck->lk.adaptive.badness = 0;
+ KMP_INC_STAT(lck, successfulSpeculations);
}
// Create a bit mask with one more set bit.
-static __inline void
-__kmp_step_badness( kmp_adaptive_lock_t *lck )
-{
- kmp_uint32 newBadness = ( lck->lk.adaptive.badness << 1 ) | 1;
- if ( newBadness > lck->lk.adaptive.max_badness) {
- return;
- } else {
- lck->lk.adaptive.badness = newBadness;
- }
+static __inline void __kmp_step_badness(kmp_adaptive_lock_t *lck) {
+ kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
+ if (newBadness > lck->lk.adaptive.max_badness) {
+ return;
+ } else {
+ lck->lk.adaptive.badness = newBadness;
+ }
}
// Check whether speculation should be attempted.
-static __inline int
-__kmp_should_speculate( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
-{
- kmp_uint32 badness = lck->lk.adaptive.badness;
- kmp_uint32 attempts= lck->lk.adaptive.acquire_attempts;
- int res = (attempts & badness) == 0;
- return res;
+static __inline int __kmp_should_speculate(kmp_adaptive_lock_t *lck,
+ kmp_int32 gtid) {
+ kmp_uint32 badness = lck->lk.adaptive.badness;
+ kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
+ int res = (attempts & badness) == 0;
+ return res;
}
// Attempt to acquire only the speculative lock.
// Does not back off to the non-speculative lock.
-//
-static int
-__kmp_test_adaptive_lock_only( kmp_adaptive_lock_t * lck, kmp_int32 gtid )
-{
- int retries = lck->lk.adaptive.max_soft_retries;
-
- // We don't explicitly count the start of speculation, rather we record
- // the results (success, hard fail, soft fail). The sum of all of those
- // is the total number of times we started speculation since all
- // speculations must end one of those ways.
- do
- {
- kmp_uint32 status = _xbegin();
- // Switch this in to disable actual speculation but exercise
- // at least some of the rest of the code. Useful for debugging...
- // kmp_uint32 status = _XABORT_NESTED;
-
- if (status == _XBEGIN_STARTED )
- { /* We have successfully started speculation
- * Check that no-one acquired the lock for real between when we last looked
- * and now. This also gets the lock cache line into our read-set,
- * which we need so that we'll abort if anyone later claims it for real.
- */
- if (! __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) )
- {
- // Lock is now visibly acquired, so someone beat us to it.
- // Abort the transaction so we'll restart from _xbegin with the
- // failure status.
- _xabort(0x01);
- KMP_ASSERT2( 0, "should not get here" );
- }
- return 1; // Lock has been acquired (speculatively)
- } else {
- // We have aborted, update the statistics
- if ( status & SOFT_ABORT_MASK)
- {
- KMP_INC_STAT(lck,softFailedSpeculations);
- // and loop round to retry.
- }
- else
- {
- KMP_INC_STAT(lck,hardFailedSpeculations);
- // Give up if we had a hard failure.
- break;
- }
- }
- } while( retries-- ); // Loop while we have retries, and didn't fail hard.
-
- // Either we had a hard failure or we didn't succeed softly after
- // the full set of attempts, so back off the badness.
- __kmp_step_badness( lck );
- return 0;
-}
-
-// Attempt to acquire the speculative lock, or back off to the non-speculative one
-// if the speculative lock cannot be acquired.
-// We can succeed speculatively, non-speculatively, or fail.
-static int
-__kmp_test_adaptive_lock( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
-{
- // First try to acquire the lock speculatively
- if ( __kmp_should_speculate( lck, gtid ) && __kmp_test_adaptive_lock_only( lck, gtid ) )
- return 1;
-
- // Speculative acquisition failed, so try to acquire it non-speculatively.
- // Count the non-speculative acquire attempt
- lck->lk.adaptive.acquire_attempts++;
-
- // Use base, non-speculative lock.
- if ( __kmp_test_queuing_lock( GET_QLK_PTR(lck), gtid ) )
- {
- KMP_INC_STAT(lck,nonSpeculativeAcquires);
- return 1; // Lock is acquired (non-speculatively)
- }
- else
- {
- return 0; // Failed to acquire the lock, it's already visibly locked.
- }
-}
-
-static int
-__kmp_test_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_lock";
- if ( lck->lk.qlk.initialized != GET_QLK_PTR(lck) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
-
- int retval = __kmp_test_adaptive_lock( lck, gtid );
-
- if ( retval ) {
- lck->lk.qlk.owner_id = gtid + 1;
- }
- return retval;
-}
-
-// Block until we can acquire a speculative, adaptive lock.
-// We check whether we should be trying to speculate.
-// If we should be, we check the real lock to see if it is free,
-// and, if not, pause without attempting to acquire it until it is.
-// Then we try the speculative acquire.
-// This means that although we suffer from lemmings a little (
-// because all we can't acquire the lock speculatively until
-// the queue of threads waiting has cleared), we don't get into a
-// state where we can never acquire the lock speculatively (because we
-// force the queue to clear by preventing new arrivals from entering the
-// queue).
-// This does mean that when we're trying to break lemmings, the lock
-// is no longer fair. However OpenMP makes no guarantee that its
-// locks are fair, so this isn't a real problem.
-static void
-__kmp_acquire_adaptive_lock( kmp_adaptive_lock_t * lck, kmp_int32 gtid )
-{
- if ( __kmp_should_speculate( lck, gtid ) )
- {
- if ( __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) )
- {
- if ( __kmp_test_adaptive_lock_only( lck , gtid ) )
- return;
- // We tried speculation and failed, so give up.
- }
- else
- {
- // We can't try speculation until the lock is free, so we
- // pause here (without suspending on the queueing lock,
- // to allow it to drain, then try again.
- // All other threads will also see the same result for
- // shouldSpeculate, so will be doing the same if they
- // try to claim the lock from now on.
- while ( ! __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) )
- {
- KMP_INC_STAT(lck,lemmingYields);
- __kmp_yield (TRUE);
- }
-
- if ( __kmp_test_adaptive_lock_only( lck, gtid ) )
- return;
- }
+static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck,
+ kmp_int32 gtid) {
+ int retries = lck->lk.adaptive.max_soft_retries;
+
+ // We don't explicitly count the start of speculation, rather we record the
+ // results (success, hard fail, soft fail). The sum of all of those is the
+ // total number of times we started speculation since all speculations must
+ // end one of those ways.
+ do {
+ kmp_uint32 status = _xbegin();
+ // Switch this in to disable actual speculation but exercise at least some
+ // of the rest of the code. Useful for debugging...
+ // kmp_uint32 status = _XABORT_NESTED;
+
+ if (status == _XBEGIN_STARTED) {
+ /* We have successfully started speculation. Check that no-one acquired
+ the lock for real between when we last looked and now. This also gets
+ the lock cache line into our read-set, which we need so that we'll
+ abort if anyone later claims it for real. */
+ if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
+ // Lock is now visibly acquired, so someone beat us to it. Abort the
+ // transaction so we'll restart from _xbegin with the failure status.
+ _xabort(0x01);
+ KMP_ASSERT2(0, "should not get here");
+ }
+ return 1; // Lock has been acquired (speculatively)
+ } else {
+ // We have aborted, update the statistics
+ if (status & SOFT_ABORT_MASK) {
+ KMP_INC_STAT(lck, softFailedSpeculations);
+ // and loop round to retry.
+ } else {
+ KMP_INC_STAT(lck, hardFailedSpeculations);
+ // Give up if we had a hard failure.
+ break;
+ }
}
+ } while (retries--); // Loop while we have retries, and didn't fail hard.
- // Speculative acquisition failed, so acquire it non-speculatively.
- // Count the non-speculative acquire attempt
- lck->lk.adaptive.acquire_attempts++;
-
- __kmp_acquire_queuing_lock_timed_template<FALSE>( GET_QLK_PTR(lck), gtid );
- // We have acquired the base lock, so count that.
- KMP_INC_STAT(lck,nonSpeculativeAcquires );
- ANNOTATE_QUEUING_ACQUIRED(lck);
+ // Either we had a hard failure or we didn't succeed softly after
+ // the full set of attempts, so back off the badness.
+ __kmp_step_badness(lck);
+ return 0;
}
-static void
-__kmp_acquire_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_lock";
- if ( lck->lk.qlk.initialized != GET_QLK_PTR(lck) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_get_queuing_lock_owner( GET_QLK_PTR(lck) ) == gtid ) {
- KMP_FATAL( LockIsAlreadyOwned, func );
- }
+// Attempt to acquire the speculative lock, or back off to the non-speculative
+// one if the speculative lock cannot be acquired.
+// We can succeed speculatively, non-speculatively, or fail.
+static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) {
+ // First try to acquire the lock speculatively
+ if (__kmp_should_speculate(lck, gtid) &&
+ __kmp_test_adaptive_lock_only(lck, gtid))
+ return 1;
+
+ // Speculative acquisition failed, so try to acquire it non-speculatively.
+ // Count the non-speculative acquire attempt
+ lck->lk.adaptive.acquire_attempts++;
+
+ // Use base, non-speculative lock.
+ if (__kmp_test_queuing_lock(GET_QLK_PTR(lck), gtid)) {
+ KMP_INC_STAT(lck, nonSpeculativeAcquires);
+ return 1; // Lock is acquired (non-speculatively)
+ } else {
+ return 0; // Failed to acquire the lock, it's already visibly locked.
+ }
+}
+
+static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_lock";
+ if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
- __kmp_acquire_adaptive_lock( lck, gtid );
+ int retval = __kmp_test_adaptive_lock(lck, gtid);
+ if (retval) {
lck->lk.qlk.owner_id = gtid + 1;
+ }
+ return retval;
}
-static int
-__kmp_release_adaptive_lock( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
-{
- if ( __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) )
- { // If the lock doesn't look claimed we must be speculating.
- // (Or the user's code is buggy and they're releasing without locking;
- // if we had XTEST we'd be able to check that case...)
- _xend(); // Exit speculation
- __kmp_update_badness_after_success( lck );
- }
- else
- { // Since the lock *is* visibly locked we're not speculating,
- // so should use the underlying lock's release scheme.
- __kmp_release_queuing_lock( GET_QLK_PTR(lck), gtid );
- }
- return KMP_LOCK_RELEASED;
-}
+// Block until we can acquire a speculative, adaptive lock. We check whether we
+// should be trying to speculate. If we should be, we check the real lock to see
+// if it is free, and, if not, pause without attempting to acquire it until it
+// is. Then we try the speculative acquire. This means that although we suffer
+// from lemmings a little (because all we can't acquire the lock speculatively
+// until the queue of threads waiting has cleared), we don't get into a state
+// where we can never acquire the lock speculatively (because we force the queue
+// to clear by preventing new arrivals from entering the queue). This does mean
+// that when we're trying to break lemmings, the lock is no longer fair. However
+// OpenMP makes no guarantee that its locks are fair, so this isn't a real
+// problem.
+static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
+ kmp_int32 gtid) {
+ if (__kmp_should_speculate(lck, gtid)) {
+ if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
+ if (__kmp_test_adaptive_lock_only(lck, gtid))
+ return;
+ // We tried speculation and failed, so give up.
+ } else {
+ // We can't try speculation until the lock is free, so we pause here
+ // (without suspending on the queueing lock, to allow it to drain, then
+ // try again. All other threads will also see the same result for
+ // shouldSpeculate, so will be doing the same if they try to claim the
+ // lock from now on.
+ while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
+ KMP_INC_STAT(lck, lemmingYields);
+ __kmp_yield(TRUE);
+ }
-static int
-__kmp_release_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( lck->lk.qlk.initialized != GET_QLK_PTR(lck) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_get_queuing_lock_owner( GET_QLK_PTR(lck) ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( __kmp_get_queuing_lock_owner( GET_QLK_PTR(lck) ) != gtid ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
+ if (__kmp_test_adaptive_lock_only(lck, gtid))
+ return;
}
- lck->lk.qlk.owner_id = 0;
- __kmp_release_adaptive_lock( lck, gtid );
- return KMP_LOCK_RELEASED;
-}
+ }
-static void
-__kmp_init_adaptive_lock( kmp_adaptive_lock_t *lck )
-{
- __kmp_init_queuing_lock( GET_QLK_PTR(lck) );
- lck->lk.adaptive.badness = 0;
- lck->lk.adaptive.acquire_attempts = 0; //nonSpeculativeAcquireAttempts = 0;
- lck->lk.adaptive.max_soft_retries = __kmp_adaptive_backoff_params.max_soft_retries;
- lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
+ // Speculative acquisition failed, so acquire it non-speculatively.
+ // Count the non-speculative acquire attempt
+ lck->lk.adaptive.acquire_attempts++;
+
+ __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid);
+ // We have acquired the base lock, so count that.
+ KMP_INC_STAT(lck, nonSpeculativeAcquires);
+ ANNOTATE_QUEUING_ACQUIRED(lck);
+}
+
+static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_lock";
+ if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == gtid) {
+ KMP_FATAL(LockIsAlreadyOwned, func);
+ }
+
+ __kmp_acquire_adaptive_lock(lck, gtid);
+
+ lck->lk.qlk.owner_id = gtid + 1;
+}
+
+static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck,
+ kmp_int32 gtid) {
+ if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
+ lck))) { // If the lock doesn't look claimed we must be speculating.
+ // (Or the user's code is buggy and they're releasing without locking;
+ // if we had XTEST we'd be able to check that case...)
+ _xend(); // Exit speculation
+ __kmp_update_badness_after_success(lck);
+ } else { // Since the lock *is* visibly locked we're not speculating,
+ // so should use the underlying lock's release scheme.
+ __kmp_release_queuing_lock(GET_QLK_PTR(lck), gtid);
+ }
+ return KMP_LOCK_RELEASED;
+}
+
+static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != gtid) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ lck->lk.qlk.owner_id = 0;
+ __kmp_release_adaptive_lock(lck, gtid);
+ return KMP_LOCK_RELEASED;
+}
+
+static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) {
+ __kmp_init_queuing_lock(GET_QLK_PTR(lck));
+ lck->lk.adaptive.badness = 0;
+ lck->lk.adaptive.acquire_attempts = 0; // nonSpeculativeAcquireAttempts = 0;
+ lck->lk.adaptive.max_soft_retries =
+ __kmp_adaptive_backoff_params.max_soft_retries;
+ lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
#if KMP_DEBUG_ADAPTIVE_LOCKS
- __kmp_zero_speculative_stats( &lck->lk.adaptive );
+ __kmp_zero_speculative_stats(&lck->lk.adaptive);
#endif
- KA_TRACE(1000, ("__kmp_init_adaptive_lock: lock %p initialized\n", lck));
+ KA_TRACE(1000, ("__kmp_init_adaptive_lock: lock %p initialized\n", lck));
}
-static void
-__kmp_init_adaptive_lock_with_checks( kmp_adaptive_lock_t * lck )
-{
- __kmp_init_adaptive_lock( lck );
+static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
+ __kmp_init_adaptive_lock(lck);
}
-static void
-__kmp_destroy_adaptive_lock( kmp_adaptive_lock_t *lck )
-{
+static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) {
#if KMP_DEBUG_ADAPTIVE_LOCKS
- __kmp_accumulate_speculative_stats( &lck->lk.adaptive );
+ __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
#endif
- __kmp_destroy_queuing_lock (GET_QLK_PTR(lck));
- // Nothing needed for the speculative part.
+ __kmp_destroy_queuing_lock(GET_QLK_PTR(lck));
+ // Nothing needed for the speculative part.
}
-static void
-__kmp_destroy_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck )
-{
- char const * const func = "omp_destroy_lock";
- if ( lck->lk.qlk.initialized != GET_QLK_PTR(lck) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_get_queuing_lock_owner( GET_QLK_PTR(lck) ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_adaptive_lock( lck );
+static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
+ char const *const func = "omp_destroy_lock";
+ if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_adaptive_lock(lck);
}
-
#endif // KMP_USE_ADAPTIVE_LOCKS
-
/* ------------------------------------------------------------------------ */
/* DRDPA ticket locks */
/* "DRDPA" means Dynamically Reconfigurable Distributed Polling Area */
-static kmp_int32
-__kmp_get_drdpa_lock_owner( kmp_drdpa_lock_t *lck )
-{
- return TCR_4( lck->lk.owner_id ) - 1;
+static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) {
+ return TCR_4(lck->lk.owner_id) - 1;
}
-static inline bool
-__kmp_is_drdpa_lock_nestable( kmp_drdpa_lock_t *lck )
-{
- return lck->lk.depth_locked != -1;
+static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) {
+ return lck->lk.depth_locked != -1;
}
__forceinline static int
-__kmp_acquire_drdpa_lock_timed_template( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- kmp_uint64 ticket = KMP_TEST_THEN_INC64((kmp_int64 *)&lck->lk.next_ticket);
- kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
- volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls
- = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
- TCR_PTR(lck->lk.polls); // volatile load
+__kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
+ kmp_uint64 ticket = KMP_TEST_THEN_INC64((kmp_int64 *)&lck->lk.next_ticket);
+ kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
+ volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls =
+ (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)TCR_PTR(
+ lck->lk.polls); // volatile load
#ifdef USE_LOCK_PROFILE
- if (TCR_8(polls[ticket & mask].poll) != ticket)
- __kmp_printf("LOCK CONTENTION: %p\n", lck);
- /* else __kmp_printf( "." );*/
+ if (TCR_8(polls[ticket & mask].poll) != ticket)
+ __kmp_printf("LOCK CONTENTION: %p\n", lck);
+/* else __kmp_printf( "." );*/
#endif /* USE_LOCK_PROFILE */
- //
- // Now spin-wait, but reload the polls pointer and mask, in case the
- // polling area has been reconfigured. Unless it is reconfigured, the
- // reloads stay in L1 cache and are cheap.
- //
- // Keep this code in sync with KMP_WAIT_YIELD, in kmp_dispatch.cpp !!!
- //
- // The current implementation of KMP_WAIT_YIELD doesn't allow for mask
- // and poll to be re-read every spin iteration.
- //
- kmp_uint32 spins;
-
- KMP_FSYNC_PREPARE(lck);
- KMP_INIT_YIELD(spins);
- while (TCR_8(polls[ticket & mask].poll) < ticket) { // volatile load
- // If we are oversubscribed,
- // or have waited a bit (and KMP_LIBRARY=turnaround), then yield.
- // CPU Pause is in the macros for yield.
- //
- KMP_YIELD(TCR_4(__kmp_nth)
- > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
- KMP_YIELD_SPIN(spins);
-
- // Re-read the mask and the poll pointer from the lock structure.
- //
- // Make certain that "mask" is read before "polls" !!!
- //
- // If another thread picks reconfigures the polling area and updates
- // their values, and we get the new value of mask and the old polls
- // pointer, we could access memory beyond the end of the old polling
- // area.
- //
- mask = TCR_8(lck->lk.mask); // volatile load
- polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
- TCR_PTR(lck->lk.polls); // volatile load
- }
-
- //
- // Critical section starts here
- //
- KMP_FSYNC_ACQUIRED(lck);
- KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
- ticket, lck));
- lck->lk.now_serving = ticket; // non-volatile store
-
- //
- // Deallocate a garbage polling area if we know that we are the last
- // thread that could possibly access it.
- //
- // The >= check is in case __kmp_test_drdpa_lock() allocated the cleanup
- // ticket.
- //
- if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
- __kmp_free((void *)lck->lk.old_polls);
- lck->lk.old_polls = NULL;
- lck->lk.cleanup_ticket = 0;
- }
-
- //
- // Check to see if we should reconfigure the polling area.
- // If there is still a garbage polling area to be deallocated from a
- // previous reconfiguration, let a later thread reconfigure it.
- //
- if (lck->lk.old_polls == NULL) {
- bool reconfigure = false;
- volatile struct kmp_base_drdpa_lock::kmp_lock_poll *old_polls = polls;
- kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
-
- if (TCR_4(__kmp_nth)
- > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
- //
- // We are in oversubscription mode. Contract the polling area
- // down to a single location, if that hasn't been done already.
- //
- if (num_polls > 1) {
- reconfigure = true;
- num_polls = TCR_4(lck->lk.num_polls);
- mask = 0;
- num_polls = 1;
- polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
- __kmp_allocate(num_polls * sizeof(*polls));
- polls[0].poll = ticket;
- }
- }
- else {
- //
- // We are in under/fully subscribed mode. Check the number of
- // threads waiting on the lock. The size of the polling area
- // should be at least the number of threads waiting.
- //
- kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
- if (num_waiting > num_polls) {
- kmp_uint32 old_num_polls = num_polls;
- reconfigure = true;
- do {
- mask = (mask << 1) | 1;
- num_polls *= 2;
- } while (num_polls <= num_waiting);
-
- //
- // Allocate the new polling area, and copy the relevant portion
- // of the old polling area to the new area. __kmp_allocate()
- // zeroes the memory it allocates, and most of the old area is
- // just zero padding, so we only copy the release counters.
- //
- polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
- __kmp_allocate(num_polls * sizeof(*polls));
- kmp_uint32 i;
- for (i = 0; i < old_num_polls; i++) {
- polls[i].poll = old_polls[i].poll;
- }
- }
- }
-
- if (reconfigure) {
- //
- // Now write the updated fields back to the lock structure.
- //
- // Make certain that "polls" is written before "mask" !!!
- //
- // If another thread picks up the new value of mask and the old
- // polls pointer , it could access memory beyond the end of the
- // old polling area.
- //
- // On x86, we need memory fences.
- //
- KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring lock %p to %d polls\n",
- ticket, lck, num_polls));
-
- lck->lk.old_polls = old_polls; // non-volatile store
- lck->lk.polls = polls; // volatile store
-
- KMP_MB();
-
- lck->lk.num_polls = num_polls; // non-volatile store
- lck->lk.mask = mask; // volatile store
-
- KMP_MB();
-
- //
- // Only after the new polling area and mask have been flushed
- // to main memory can we update the cleanup ticket field.
- //
- // volatile load / non-volatile store
- //
- lck->lk.cleanup_ticket = TCR_8(lck->lk.next_ticket);
- }
- }
- return KMP_LOCK_ACQUIRED_FIRST;
-}
-
-int
-__kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- int retval = __kmp_acquire_drdpa_lock_timed_template( lck, gtid );
- ANNOTATE_DRDPA_ACQUIRED(lck);
- return retval;
-}
-
-static int
-__kmp_acquire_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_drdpa_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( ( gtid >= 0 ) && ( __kmp_get_drdpa_lock_owner( lck ) == gtid ) ) {
- KMP_FATAL( LockIsAlreadyOwned, func );
- }
-
- __kmp_acquire_drdpa_lock( lck, gtid );
-
- lck->lk.owner_id = gtid + 1;
- return KMP_LOCK_ACQUIRED_FIRST;
-}
-
-int
-__kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- //
- // First get a ticket, then read the polls pointer and the mask.
- // The polls pointer must be read before the mask!!! (See above)
- //
- kmp_uint64 ticket = TCR_8(lck->lk.next_ticket); // volatile load
- volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls
- = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
- TCR_PTR(lck->lk.polls); // volatile load
- kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
- if (TCR_8(polls[ticket & mask].poll) == ticket) {
- kmp_uint64 next_ticket = ticket + 1;
- if (KMP_COMPARE_AND_STORE_ACQ64((kmp_int64 *)&lck->lk.next_ticket,
- ticket, next_ticket)) {
- KMP_FSYNC_ACQUIRED(lck);
- KA_TRACE(1000, ("__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
- ticket, lck));
- lck->lk.now_serving = ticket; // non-volatile store
-
- //
- // Since no threads are waiting, there is no possibility that
- // we would want to reconfigure the polling area. We might
- // have the cleanup ticket value (which says that it is now
- // safe to deallocate old_polls), but we'll let a later thread
- // which calls __kmp_acquire_lock do that - this routine
- // isn't supposed to block, and we would risk blocks if we
- // called __kmp_free() to do the deallocation.
- //
- return TRUE;
- }
- }
- return FALSE;
-}
-
-static int
-__kmp_test_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_drdpa_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
-
- int retval = __kmp_test_drdpa_lock( lck, gtid );
-
- if ( retval ) {
- lck->lk.owner_id = gtid + 1;
- }
- return retval;
-}
-
-int
-__kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- //
- // Read the ticket value from the lock data struct, then the polls
- // pointer and the mask. The polls pointer must be read before the
- // mask!!! (See above)
- //
- kmp_uint64 ticket = lck->lk.now_serving + 1; // non-volatile load
- volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls
- = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
- TCR_PTR(lck->lk.polls); // volatile load
- kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
- KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
- ticket - 1, lck));
- KMP_FSYNC_RELEASING(lck);
- ANNOTATE_DRDPA_RELEASED(lck);
- KMP_ST_REL64(&(polls[ticket & mask].poll), ticket); // volatile store
- return KMP_LOCK_RELEASED;
-}
-
-static int
-__kmp_release_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_drdpa_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_drdpa_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( ( gtid >= 0 ) && ( __kmp_get_drdpa_lock_owner( lck ) >= 0 )
- && ( __kmp_get_drdpa_lock_owner( lck ) != gtid ) ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- lck->lk.owner_id = 0;
- return __kmp_release_drdpa_lock( lck, gtid );
-}
-
-void
-__kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck )
-{
- lck->lk.location = NULL;
- lck->lk.mask = 0;
- lck->lk.num_polls = 1;
- lck->lk.polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
- __kmp_allocate(lck->lk.num_polls * sizeof(*(lck->lk.polls)));
- lck->lk.cleanup_ticket = 0;
+ // Now spin-wait, but reload the polls pointer and mask, in case the
+ // polling area has been reconfigured. Unless it is reconfigured, the
+ // reloads stay in L1 cache and are cheap.
+ //
+ // Keep this code in sync with KMP_WAIT_YIELD, in kmp_dispatch.cpp !!!
+ //
+ // The current implementation of KMP_WAIT_YIELD doesn't allow for mask
+ // and poll to be re-read every spin iteration.
+ kmp_uint32 spins;
+
+ KMP_FSYNC_PREPARE(lck);
+ KMP_INIT_YIELD(spins);
+ while (TCR_8(polls[ticket & mask].poll) < ticket) { // volatile load
+ // If we are oversubscribed,
+ // or have waited a bit (and KMP_LIBRARY=turnaround), then yield.
+ // CPU Pause is in the macros for yield.
+ //
+ KMP_YIELD(TCR_4(__kmp_nth) >
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
+ KMP_YIELD_SPIN(spins);
+
+ // Re-read the mask and the poll pointer from the lock structure.
+ //
+ // Make certain that "mask" is read before "polls" !!!
+ //
+ // If another thread picks reconfigures the polling area and updates their
+ // values, and we get the new value of mask and the old polls pointer, we
+ // could access memory beyond the end of the old polling area.
+ mask = TCR_8(lck->lk.mask); // volatile load
+ polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)TCR_PTR(
+ lck->lk.polls); // volatile load
+ }
+
+ // Critical section starts here
+ KMP_FSYNC_ACQUIRED(lck);
+ KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
+ ticket, lck));
+ lck->lk.now_serving = ticket; // non-volatile store
+
+ // Deallocate a garbage polling area if we know that we are the last
+ // thread that could possibly access it.
+ //
+ // The >= check is in case __kmp_test_drdpa_lock() allocated the cleanup
+ // ticket.
+ if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
+ __kmp_free((void *)lck->lk.old_polls);
lck->lk.old_polls = NULL;
- lck->lk.next_ticket = 0;
- lck->lk.now_serving = 0;
- lck->lk.owner_id = 0; // no thread owns the lock.
- lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
- lck->lk.initialized = lck;
-
- KA_TRACE(1000, ("__kmp_init_drdpa_lock: lock %p initialized\n", lck));
-}
-
-static void
-__kmp_init_drdpa_lock_with_checks( kmp_drdpa_lock_t * lck )
-{
- __kmp_init_drdpa_lock( lck );
-}
-
-void
-__kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck )
-{
- lck->lk.initialized = NULL;
- lck->lk.location = NULL;
- if (lck->lk.polls != NULL) {
- __kmp_free((void *)lck->lk.polls);
- lck->lk.polls = NULL;
- }
- if (lck->lk.old_polls != NULL) {
- __kmp_free((void *)lck->lk.old_polls);
- lck->lk.old_polls = NULL;
- }
- lck->lk.mask = 0;
- lck->lk.num_polls = 0;
lck->lk.cleanup_ticket = 0;
- lck->lk.next_ticket = 0;
- lck->lk.now_serving = 0;
- lck->lk.owner_id = 0;
- lck->lk.depth_locked = -1;
-}
-
-static void
-__kmp_destroy_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck )
-{
- char const * const func = "omp_destroy_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( __kmp_is_drdpa_lock_nestable( lck ) ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- if ( __kmp_get_drdpa_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_drdpa_lock( lck );
-}
+ }
-
-//
-// nested drdpa ticket locks
-//
-
-int
-__kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
-
- if ( __kmp_get_drdpa_lock_owner( lck ) == gtid ) {
- lck->lk.depth_locked += 1;
- return KMP_LOCK_ACQUIRED_NEXT;
- }
- else {
- __kmp_acquire_drdpa_lock_timed_template( lck, gtid );
- ANNOTATE_DRDPA_ACQUIRED(lck);
- KMP_MB();
- lck->lk.depth_locked = 1;
- KMP_MB();
- lck->lk.owner_id = gtid + 1;
- return KMP_LOCK_ACQUIRED_FIRST;
- }
-}
-
-static void
-__kmp_acquire_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_set_nest_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_drdpa_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- __kmp_acquire_nested_drdpa_lock( lck, gtid );
+ // Check to see if we should reconfigure the polling area.
+ // If there is still a garbage polling area to be deallocated from a
+ // previous reconfiguration, let a later thread reconfigure it.
+ if (lck->lk.old_polls == NULL) {
+ bool reconfigure = false;
+ volatile struct kmp_base_drdpa_lock::kmp_lock_poll *old_polls = polls;
+ kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
+
+ if (TCR_4(__kmp_nth) >
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
+ // We are in oversubscription mode. Contract the polling area
+ // down to a single location, if that hasn't been done already.
+ if (num_polls > 1) {
+ reconfigure = true;
+ num_polls = TCR_4(lck->lk.num_polls);
+ mask = 0;
+ num_polls = 1;
+ polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
+ __kmp_allocate(num_polls * sizeof(*polls));
+ polls[0].poll = ticket;
+ }
+ } else {
+ // We are in under/fully subscribed mode. Check the number of
+ // threads waiting on the lock. The size of the polling area
+ // should be at least the number of threads waiting.
+ kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
+ if (num_waiting > num_polls) {
+ kmp_uint32 old_num_polls = num_polls;
+ reconfigure = true;
+ do {
+ mask = (mask << 1) | 1;
+ num_polls *= 2;
+ } while (num_polls <= num_waiting);
+
+ // Allocate the new polling area, and copy the relevant portion
+ // of the old polling area to the new area. __kmp_allocate()
+ // zeroes the memory it allocates, and most of the old area is
+ // just zero padding, so we only copy the release counters.
+ polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
+ __kmp_allocate(num_polls * sizeof(*polls));
+ kmp_uint32 i;
+ for (i = 0; i < old_num_polls; i++) {
+ polls[i].poll = old_polls[i].poll;
+ }
+ }
+ }
+
+ if (reconfigure) {
+ // Now write the updated fields back to the lock structure.
+ //
+ // Make certain that "polls" is written before "mask" !!!
+ //
+ // If another thread picks up the new value of mask and the old polls
+ // pointer , it could access memory beyond the end of the old polling
+ // area.
+ //
+ // On x86, we need memory fences.
+ KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring "
+ "lock %p to %d polls\n",
+ ticket, lck, num_polls));
+
+ lck->lk.old_polls = old_polls; // non-volatile store
+ lck->lk.polls = polls; // volatile store
+
+ KMP_MB();
+
+ lck->lk.num_polls = num_polls; // non-volatile store
+ lck->lk.mask = mask; // volatile store
+
+ KMP_MB();
+
+ // Only after the new polling area and mask have been flushed
+ // to main memory can we update the cleanup ticket field.
+ //
+ // volatile load / non-volatile store
+ lck->lk.cleanup_ticket = TCR_8(lck->lk.next_ticket);
+ }
+ }
+ return KMP_LOCK_ACQUIRED_FIRST;
+}
+
+int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
+ int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
+ ANNOTATE_DRDPA_ACQUIRED(lck);
+ return retval;
}
-int
-__kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- int retval;
+static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_drdpa_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) == gtid)) {
+ KMP_FATAL(LockIsAlreadyOwned, func);
+ }
+
+ __kmp_acquire_drdpa_lock(lck, gtid);
+
+ lck->lk.owner_id = gtid + 1;
+ return KMP_LOCK_ACQUIRED_FIRST;
+}
+
+int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
+ // First get a ticket, then read the polls pointer and the mask.
+ // The polls pointer must be read before the mask!!! (See above)
+ kmp_uint64 ticket = TCR_8(lck->lk.next_ticket); // volatile load
+ volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls =
+ (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)TCR_PTR(
+ lck->lk.polls); // volatile load
+ kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
+ if (TCR_8(polls[ticket & mask].poll) == ticket) {
+ kmp_uint64 next_ticket = ticket + 1;
+ if (KMP_COMPARE_AND_STORE_ACQ64((kmp_int64 *)&lck->lk.next_ticket, ticket,
+ next_ticket)) {
+ KMP_FSYNC_ACQUIRED(lck);
+ KA_TRACE(1000, ("__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
+ ticket, lck));
+ lck->lk.now_serving = ticket; // non-volatile store
+
+ // Since no threads are waiting, there is no possibility that we would
+ // want to reconfigure the polling area. We might have the cleanup ticket
+ // value (which says that it is now safe to deallocate old_polls), but
+ // we'll let a later thread which calls __kmp_acquire_lock do that - this
+ // routine isn't supposed to block, and we would risk blocks if we called
+ // __kmp_free() to do the deallocation.
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_drdpa_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
- KMP_DEBUG_ASSERT( gtid >= 0 );
+ int retval = __kmp_test_drdpa_lock(lck, gtid);
- if ( __kmp_get_drdpa_lock_owner( lck ) == gtid ) {
- retval = ++lck->lk.depth_locked;
- }
- else if ( !__kmp_test_drdpa_lock( lck, gtid ) ) {
- retval = 0;
- }
- else {
- KMP_MB();
- retval = lck->lk.depth_locked = 1;
- KMP_MB();
- lck->lk.owner_id = gtid + 1;
- }
- return retval;
+ if (retval) {
+ lck->lk.owner_id = gtid + 1;
+ }
+ return retval;
}
-static int
-__kmp_test_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_test_nest_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_drdpa_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- return __kmp_test_nested_drdpa_lock( lck, gtid );
+int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
+ // Read the ticket value from the lock data struct, then the polls pointer and
+ // the mask. The polls pointer must be read before the mask!!! (See above)
+ kmp_uint64 ticket = lck->lk.now_serving + 1; // non-volatile load
+ volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls =
+ (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)TCR_PTR(
+ lck->lk.polls); // volatile load
+ kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
+ KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
+ ticket - 1, lck));
+ KMP_FSYNC_RELEASING(lck);
+ ANNOTATE_DRDPA_RELEASED(lck);
+ KMP_ST_REL64(&(polls[ticket & mask].poll), ticket); // volatile store
+ return KMP_LOCK_RELEASED;
+}
+
+static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_drdpa_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_drdpa_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) >= 0) &&
+ (__kmp_get_drdpa_lock_owner(lck) != gtid)) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ lck->lk.owner_id = 0;
+ return __kmp_release_drdpa_lock(lck, gtid);
+}
+
+void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) {
+ lck->lk.location = NULL;
+ lck->lk.mask = 0;
+ lck->lk.num_polls = 1;
+ lck->lk.polls =
+ (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)__kmp_allocate(
+ lck->lk.num_polls * sizeof(*(lck->lk.polls)));
+ lck->lk.cleanup_ticket = 0;
+ lck->lk.old_polls = NULL;
+ lck->lk.next_ticket = 0;
+ lck->lk.now_serving = 0;
+ lck->lk.owner_id = 0; // no thread owns the lock.
+ lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
+ lck->lk.initialized = lck;
+
+ KA_TRACE(1000, ("__kmp_init_drdpa_lock: lock %p initialized\n", lck));
+}
+
+static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
+ __kmp_init_drdpa_lock(lck);
+}
+
+void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) {
+ lck->lk.initialized = NULL;
+ lck->lk.location = NULL;
+ if (lck->lk.polls != NULL) {
+ __kmp_free((void *)lck->lk.polls);
+ lck->lk.polls = NULL;
+ }
+ if (lck->lk.old_polls != NULL) {
+ __kmp_free((void *)lck->lk.old_polls);
+ lck->lk.old_polls = NULL;
+ }
+ lck->lk.mask = 0;
+ lck->lk.num_polls = 0;
+ lck->lk.cleanup_ticket = 0;
+ lck->lk.next_ticket = 0;
+ lck->lk.now_serving = 0;
+ lck->lk.owner_id = 0;
+ lck->lk.depth_locked = -1;
+}
+
+static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
+ char const *const func = "omp_destroy_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (__kmp_is_drdpa_lock_nestable(lck)) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
+ if (__kmp_get_drdpa_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_drdpa_lock(lck);
}
-int
-__kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( gtid >= 0 );
+// nested drdpa ticket locks
+
+int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
+ if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
+ lck->lk.depth_locked += 1;
+ return KMP_LOCK_ACQUIRED_NEXT;
+ } else {
+ __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
+ ANNOTATE_DRDPA_ACQUIRED(lck);
KMP_MB();
- if ( --(lck->lk.depth_locked) == 0 ) {
- KMP_MB();
- lck->lk.owner_id = 0;
- __kmp_release_drdpa_lock( lck, gtid );
- return KMP_LOCK_RELEASED;
- }
- return KMP_LOCK_STILL_HELD;
+ lck->lk.depth_locked = 1;
+ KMP_MB();
+ lck->lk.owner_id = gtid + 1;
+ return KMP_LOCK_ACQUIRED_FIRST;
+ }
}
-static int
-__kmp_release_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
-{
- char const * const func = "omp_unset_nest_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_drdpa_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_drdpa_lock_owner( lck ) == -1 ) {
- KMP_FATAL( LockUnsettingFree, func );
- }
- if ( __kmp_get_drdpa_lock_owner( lck ) != gtid ) {
- KMP_FATAL( LockUnsettingSetByAnother, func );
- }
- return __kmp_release_nested_drdpa_lock( lck, gtid );
+static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_set_nest_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_drdpa_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ __kmp_acquire_nested_drdpa_lock(lck, gtid);
+}
+
+int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
+ int retval;
+
+ KMP_DEBUG_ASSERT(gtid >= 0);
+
+ if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
+ retval = ++lck->lk.depth_locked;
+ } else if (!__kmp_test_drdpa_lock(lck, gtid)) {
+ retval = 0;
+ } else {
+ KMP_MB();
+ retval = lck->lk.depth_locked = 1;
+ KMP_MB();
+ lck->lk.owner_id = gtid + 1;
+ }
+ return retval;
}
-void
-__kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t * lck )
-{
- __kmp_init_drdpa_lock( lck );
- lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
+static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_test_nest_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_drdpa_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ return __kmp_test_nested_drdpa_lock(lck, gtid);
}
-static void
-__kmp_init_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t * lck )
-{
- __kmp_init_nested_drdpa_lock( lck );
-}
+int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(gtid >= 0);
-void
-__kmp_destroy_nested_drdpa_lock( kmp_drdpa_lock_t *lck )
-{
- __kmp_destroy_drdpa_lock( lck );
- lck->lk.depth_locked = 0;
+ KMP_MB();
+ if (--(lck->lk.depth_locked) == 0) {
+ KMP_MB();
+ lck->lk.owner_id = 0;
+ __kmp_release_drdpa_lock(lck, gtid);
+ return KMP_LOCK_RELEASED;
+ }
+ return KMP_LOCK_STILL_HELD;
}
-static void
-__kmp_destroy_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck )
-{
- char const * const func = "omp_destroy_nest_lock";
- if ( lck->lk.initialized != lck ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- if ( ! __kmp_is_drdpa_lock_nestable( lck ) ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- if ( __kmp_get_drdpa_lock_owner( lck ) != -1 ) {
- KMP_FATAL( LockStillOwned, func );
- }
- __kmp_destroy_nested_drdpa_lock( lck );
+static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
+ kmp_int32 gtid) {
+ char const *const func = "omp_unset_nest_lock";
+ KMP_MB(); /* in case another processor initialized lock */
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_drdpa_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_drdpa_lock_owner(lck) == -1) {
+ KMP_FATAL(LockUnsettingFree, func);
+ }
+ if (__kmp_get_drdpa_lock_owner(lck) != gtid) {
+ KMP_FATAL(LockUnsettingSetByAnother, func);
+ }
+ return __kmp_release_nested_drdpa_lock(lck, gtid);
+}
+
+void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
+ __kmp_init_drdpa_lock(lck);
+ lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
+}
+
+static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
+ __kmp_init_nested_drdpa_lock(lck);
+}
+
+void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
+ __kmp_destroy_drdpa_lock(lck);
+ lck->lk.depth_locked = 0;
+}
+
+static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
+ char const *const func = "omp_destroy_nest_lock";
+ if (lck->lk.initialized != lck) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ if (!__kmp_is_drdpa_lock_nestable(lck)) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ if (__kmp_get_drdpa_lock_owner(lck) != -1) {
+ KMP_FATAL(LockStillOwned, func);
+ }
+ __kmp_destroy_nested_drdpa_lock(lck);
}
-
-//
// access functions to fields which don't exist for all lock kinds.
-//
-static int
-__kmp_is_drdpa_lock_initialized( kmp_drdpa_lock_t *lck )
-{
- return lck == lck->lk.initialized;
+static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) {
+ return lck == lck->lk.initialized;
}
-static const ident_t *
-__kmp_get_drdpa_lock_location( kmp_drdpa_lock_t *lck )
-{
- return lck->lk.location;
+static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) {
+ return lck->lk.location;
}
-static void
-__kmp_set_drdpa_lock_location( kmp_drdpa_lock_t *lck, const ident_t *loc )
-{
- lck->lk.location = loc;
+static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck,
+ const ident_t *loc) {
+ lck->lk.location = loc;
}
-static kmp_lock_flags_t
-__kmp_get_drdpa_lock_flags( kmp_drdpa_lock_t *lck )
-{
- return lck->lk.flags;
+static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) {
+ return lck->lk.flags;
}
-static void
-__kmp_set_drdpa_lock_flags( kmp_drdpa_lock_t *lck, kmp_lock_flags_t flags )
-{
- lck->lk.flags = flags;
+static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck,
+ kmp_lock_flags_t flags) {
+ lck->lk.flags = flags;
}
// Time stamp counter
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
-# define __kmp_tsc() __kmp_hardware_timestamp()
+#define __kmp_tsc() __kmp_hardware_timestamp()
// Runtime's default backoff parameters
-kmp_backoff_t __kmp_spin_backoff_params = { 1, 4096, 100 };
+kmp_backoff_t __kmp_spin_backoff_params = {1, 4096, 100};
#else
// Use nanoseconds for other platforms
extern kmp_uint64 __kmp_now_nsec();
-kmp_backoff_t __kmp_spin_backoff_params = { 1, 256, 100 };
-# define __kmp_tsc() __kmp_now_nsec()
+kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100};
+#define __kmp_tsc() __kmp_now_nsec()
#endif
// A useful predicate for dealing with timestamps that may wrap.
-// Is a before b?
-// Since the timestamps may wrap, this is asking whether it's
+// Is a before b? Since the timestamps may wrap, this is asking whether it's
// shorter to go clockwise from a to b around the clock-face, or anti-clockwise.
// Times where going clockwise is less distance than going anti-clockwise
-// are in the future, others are in the past.
-// e.g.) a = MAX-1, b = MAX+1 (=0), then a > b (true) does not mean a reached b
-// whereas signed(a) = -2, signed(b) = 0 captures the actual difference
-static inline bool before(kmp_uint64 a, kmp_uint64 b)
-{
- return ((kmp_int64)b - (kmp_int64)a) > 0;
+// are in the future, others are in the past. e.g. a = MAX-1, b = MAX+1 (=0),
+// then a > b (true) does not mean a reached b; whereas signed(a) = -2,
+// signed(b) = 0 captures the actual difference
+static inline bool before(kmp_uint64 a, kmp_uint64 b) {
+ return ((kmp_int64)b - (kmp_int64)a) > 0;
}
// Truncated binary exponential backoff function
-void
-__kmp_spin_backoff(kmp_backoff_t *boff)
-{
- // We could flatten this loop, but making it a nested loop gives better result.
- kmp_uint32 i;
- for (i = boff->step; i > 0; i--) {
- kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
- do {
- KMP_CPU_PAUSE();
- } while (before(__kmp_tsc(), goal));
- }
- boff->step = (boff->step<<1 | 1) & (boff->max_backoff-1);
+void __kmp_spin_backoff(kmp_backoff_t *boff) {
+ // We could flatten this loop, but making it a nested loop gives better result
+ kmp_uint32 i;
+ for (i = boff->step; i > 0; i--) {
+ kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
+ do {
+ KMP_CPU_PAUSE();
+ } while (before(__kmp_tsc(), goal));
+ }
+ boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1);
}
#if KMP_USE_DYNAMIC_LOCK
-// Direct lock initializers. It simply writes a tag to the low 8 bits of the lock word.
-static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck, kmp_dyna_lockseq_t seq)
-{
- TCW_4(*lck, KMP_GET_D_TAG(seq));
- KA_TRACE(20, ("__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
+// Direct lock initializers. It simply writes a tag to the low 8 bits of the
+// lock word.
+static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
+ kmp_dyna_lockseq_t seq) {
+ TCW_4(*lck, KMP_GET_D_TAG(seq));
+ KA_TRACE(
+ 20,
+ ("__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
}
#if KMP_USE_TSX
@@ -3097,207 +2797,183 @@ static void __kmp_init_direct_lock(kmp_d
#define HLE_ACQUIRE ".byte 0xf2;"
#define HLE_RELEASE ".byte 0xf3;"
-static inline kmp_uint32
-swap4(kmp_uint32 volatile *p, kmp_uint32 v)
-{
- __asm__ volatile(HLE_ACQUIRE "xchg %1,%0"
- : "+r"(v), "+m"(*p)
- :
- : "memory");
- return v;
+static inline kmp_uint32 swap4(kmp_uint32 volatile *p, kmp_uint32 v) {
+ __asm__ volatile(HLE_ACQUIRE "xchg %1,%0" : "+r"(v), "+m"(*p) : : "memory");
+ return v;
}
-static void
-__kmp_destroy_hle_lock(kmp_dyna_lock_t *lck)
-{
- TCW_4(*lck, 0);
-}
+static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); }
-static void
-__kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid)
-{
- // Use gtid for KMP_LOCK_BUSY if necessary
- if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
- int delay = 1;
- do {
- while (*(kmp_uint32 volatile *)lck != KMP_LOCK_FREE(hle)) {
- for (int i = delay; i != 0; --i)
- KMP_CPU_PAUSE();
- delay = ((delay << 1) | 1) & 7;
- }
- } while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
- }
+static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
+ // Use gtid for KMP_LOCK_BUSY if necessary
+ if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
+ int delay = 1;
+ do {
+ while (*(kmp_uint32 volatile *)lck != KMP_LOCK_FREE(hle)) {
+ for (int i = delay; i != 0; --i)
+ KMP_CPU_PAUSE();
+ delay = ((delay << 1) | 1) & 7;
+ }
+ } while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
+ }
}
-static void
-__kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck, kmp_int32 gtid)
-{
- __kmp_acquire_hle_lock(lck, gtid); // TODO: add checks
+static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck,
+ kmp_int32 gtid) {
+ __kmp_acquire_hle_lock(lck, gtid); // TODO: add checks
}
-static int
-__kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid)
-{
- __asm__ volatile(HLE_RELEASE "movl %1,%0"
- : "=m"(*lck)
- : "r"(KMP_LOCK_FREE(hle))
- : "memory");
- return KMP_LOCK_RELEASED;
+static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
+ __asm__ volatile(HLE_RELEASE "movl %1,%0"
+ : "=m"(*lck)
+ : "r"(KMP_LOCK_FREE(hle))
+ : "memory");
+ return KMP_LOCK_RELEASED;
}
-static int
-__kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck, kmp_int32 gtid)
-{
- return __kmp_release_hle_lock(lck, gtid); // TODO: add checks
+static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck,
+ kmp_int32 gtid) {
+ return __kmp_release_hle_lock(lck, gtid); // TODO: add checks
}
-static int
-__kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid)
-{
- return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
+static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
+ return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
}
-static int
-__kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck, kmp_int32 gtid)
-{
- return __kmp_test_hle_lock(lck, gtid); // TODO: add checks
+static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck,
+ kmp_int32 gtid) {
+ return __kmp_test_hle_lock(lck, gtid); // TODO: add checks
}
-static void
-__kmp_init_rtm_lock(kmp_queuing_lock_t *lck)
-{
- __kmp_init_queuing_lock(lck);
+static void __kmp_init_rtm_lock(kmp_queuing_lock_t *lck) {
+ __kmp_init_queuing_lock(lck);
}
-static void
-__kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck)
-{
- __kmp_destroy_queuing_lock(lck);
+static void __kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck) {
+ __kmp_destroy_queuing_lock(lck);
}
-static void
-__kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
-{
- unsigned retries=3, status;
- do {
- status = _xbegin();
- if (status == _XBEGIN_STARTED) {
- if (__kmp_is_unlocked_queuing_lock(lck))
- return;
- _xabort(0xff);
- }
- if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
- // Wait until lock becomes free
- while (! __kmp_is_unlocked_queuing_lock(lck))
- __kmp_yield(TRUE);
- }
- else if (!(status & _XABORT_RETRY))
- break;
- } while (retries--);
+static void __kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ unsigned retries = 3, status;
+ do {
+ status = _xbegin();
+ if (status == _XBEGIN_STARTED) {
+ if (__kmp_is_unlocked_queuing_lock(lck))
+ return;
+ _xabort(0xff);
+ }
+ if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
+ // Wait until lock becomes free
+ while (!__kmp_is_unlocked_queuing_lock(lck))
+ __kmp_yield(TRUE);
+ } else if (!(status & _XABORT_RETRY))
+ break;
+ } while (retries--);
- // Fall-back non-speculative lock (xchg)
- __kmp_acquire_queuing_lock(lck, gtid);
+ // Fall-back non-speculative lock (xchg)
+ __kmp_acquire_queuing_lock(lck, gtid);
}
-static void
-__kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
-{
- __kmp_acquire_rtm_lock(lck, gtid);
+static void __kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ __kmp_acquire_rtm_lock(lck, gtid);
}
-static int
-__kmp_release_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
-{
- if (__kmp_is_unlocked_queuing_lock(lck)) {
- // Releasing from speculation
- _xend();
- }
- else {
- // Releasing from a real lock
- __kmp_release_queuing_lock(lck, gtid);
- }
- return KMP_LOCK_RELEASED;
+static int __kmp_release_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ if (__kmp_is_unlocked_queuing_lock(lck)) {
+ // Releasing from speculation
+ _xend();
+ } else {
+ // Releasing from a real lock
+ __kmp_release_queuing_lock(lck, gtid);
+ }
+ return KMP_LOCK_RELEASED;
}
-static int
-__kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
-{
- return __kmp_release_rtm_lock(lck, gtid);
+static int __kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ return __kmp_release_rtm_lock(lck, gtid);
}
-static int
-__kmp_test_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
-{
- unsigned retries=3, status;
- do {
- status = _xbegin();
- if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
- return 1;
- }
- if (!(status & _XABORT_RETRY))
- break;
- } while (retries--);
+static int __kmp_test_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
+ unsigned retries = 3, status;
+ do {
+ status = _xbegin();
+ if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
+ return 1;
+ }
+ if (!(status & _XABORT_RETRY))
+ break;
+ } while (retries--);
- return (__kmp_is_unlocked_queuing_lock(lck))? 1: 0;
+ return (__kmp_is_unlocked_queuing_lock(lck)) ? 1 : 0;
}
-static int
-__kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
-{
- return __kmp_test_rtm_lock(lck, gtid);
+static int __kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid) {
+ return __kmp_test_rtm_lock(lck, gtid);
}
#endif // KMP_USE_TSX
-// Entry functions for indirect locks (first element of direct lock jump tables).
-static void __kmp_init_indirect_lock(kmp_dyna_lock_t * l, kmp_dyna_lockseq_t tag);
-static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t * lock);
-static void __kmp_set_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32);
-static int __kmp_unset_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32);
-static int __kmp_test_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32);
-static void __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32);
-static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32);
-static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32);
+// Entry functions for indirect locks (first element of direct lock jump tables)
+static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
+ kmp_dyna_lockseq_t tag);
+static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
+static void __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
+static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
+static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
+static void __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
+ kmp_int32);
+static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
+ kmp_int32);
+static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
+ kmp_int32);
-//
-// Jump tables for the indirect lock functions.
-// Only fill in the odd entries, that avoids the need to shift out the low bit.
-//
+// Jump tables for the indirect lock functions
+// Only fill in the odd entries, that avoids the need to shift out the low bit
// init functions
-#define expand(l, op) 0,__kmp_init_direct_lock,
-void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t)
- = { __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init) };
+#define expand(l, op) 0, __kmp_init_direct_lock,
+void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = {
+ __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)};
#undef expand
// destroy functions
-#define expand(l, op) 0,(void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock,
-void (*__kmp_direct_destroy[])(kmp_dyna_lock_t *)
- = { __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy) };
+#define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock,
+void (*__kmp_direct_destroy[])(kmp_dyna_lock_t *) = {
+ __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
#undef expand
// set/acquire functions
-#define expand(l, op) 0,(void (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
-static void (*direct_set[])(kmp_dyna_lock_t *, kmp_int32)
- = { __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire) };
+#define expand(l, op) \
+ 0, (void (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
+static void (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
+ __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
#undef expand
-#define expand(l, op) 0,(void (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
-static void (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32)
- = { __kmp_set_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, acquire) };
+#define expand(l, op) \
+ 0, (void (*)(kmp_dyna_lock_t *, \
+ kmp_int32))__kmp_##op##_##l##_lock_with_checks,
+static void (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
+ __kmp_set_indirect_lock_with_checks, 0,
+ KMP_FOREACH_D_LOCK(expand, acquire)};
#undef expand
// unset/release and test functions
-#define expand(l, op) 0,(int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
-static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32)
- = { __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release) };
-static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32)
- = { __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test) };
+#define expand(l, op) \
+ 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
+static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = {
+ __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release)};
+static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = {
+ __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test)};
#undef expand
-#define expand(l, op) 0,(int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
-static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32)
- = { __kmp_unset_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, release) };
-static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32)
- = { __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test) };
+#define expand(l, op) \
+ 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
+static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = {
+ __kmp_unset_indirect_lock_with_checks, 0,
+ KMP_FOREACH_D_LOCK(expand, release)};
+static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = {
+ __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test)};
#undef expand
// Exposes only one set of jump tables (*lock or *lock_with_checks).
@@ -3305,30 +2981,40 @@ void (*(*__kmp_direct_set))(kmp_dyna_loc
int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32) = 0;
int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32) = 0;
-//
-// Jump tables for the indirect lock functions.
-//
-#define expand(l, op) (void (*)(kmp_user_lock_p))__kmp_##op##_##l##_##lock,
-void (*__kmp_indirect_init[])(kmp_user_lock_p) = { KMP_FOREACH_I_LOCK(expand, init) };
-void (*__kmp_indirect_destroy[])(kmp_user_lock_p) = { KMP_FOREACH_I_LOCK(expand, destroy) };
+// Jump tables for the indirect lock functions
+#define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
+void (*__kmp_indirect_init[])(kmp_user_lock_p) = {
+ KMP_FOREACH_I_LOCK(expand, init)};
+void (*__kmp_indirect_destroy[])(kmp_user_lock_p) = {
+ KMP_FOREACH_I_LOCK(expand, destroy)};
#undef expand
// set/acquire functions
-#define expand(l, op) (void (*)(kmp_user_lock_p, kmp_int32))__kmp_##op##_##l##_##lock,
-static void (*indirect_set[])(kmp_user_lock_p, kmp_int32) = { KMP_FOREACH_I_LOCK(expand, acquire) };
+#define expand(l, op) \
+ (void (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
+static void (*indirect_set[])(kmp_user_lock_p, kmp_int32) = {
+ KMP_FOREACH_I_LOCK(expand, acquire)};
#undef expand
-#define expand(l, op) (void (*)(kmp_user_lock_p, kmp_int32))__kmp_##op##_##l##_##lock_with_checks,
-static void (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = { KMP_FOREACH_I_LOCK(expand, acquire) };
+#define expand(l, op) \
+ (void (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
+static void (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
+ KMP_FOREACH_I_LOCK(expand, acquire)};
#undef expand
// unset/release and test functions
-#define expand(l, op) (int (*)(kmp_user_lock_p, kmp_int32))__kmp_##op##_##l##_##lock,
-static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = { KMP_FOREACH_I_LOCK(expand, release) };
-static int (*indirect_test[])(kmp_user_lock_p, kmp_int32) = { KMP_FOREACH_I_LOCK(expand, test) };
+#define expand(l, op) \
+ (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
+static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = {
+ KMP_FOREACH_I_LOCK(expand, release)};
+static int (*indirect_test[])(kmp_user_lock_p,
+ kmp_int32) = {KMP_FOREACH_I_LOCK(expand, test)};
#undef expand
-#define expand(l, op) (int (*)(kmp_user_lock_p, kmp_int32))__kmp_##op##_##l##_##lock_with_checks,
-static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = { KMP_FOREACH_I_LOCK(expand, release) };
-static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = { KMP_FOREACH_I_LOCK(expand, test) };
+#define expand(l, op) \
+ (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
+static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = {
+ KMP_FOREACH_I_LOCK(expand, release)};
+static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = {
+ KMP_FOREACH_I_LOCK(expand, test)};
#undef expand
// Exposes only one jump tables (*lock or *lock_with_checks).
@@ -3340,954 +3026,875 @@ int (*(*__kmp_indirect_test))(kmp_user_l
kmp_indirect_lock_table_t __kmp_i_lock_table;
// Size of indirect locks.
-static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = { 0 };
+static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0};
// Jump tables for lock accessor/modifier.
-void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p, const ident_t *) = { 0 };
-void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p, kmp_lock_flags_t) = { 0 };
-const ident_t * (*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p) = { 0 };
-kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p) = { 0 };
+void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
+ const ident_t *) = {0};
+void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
+ kmp_lock_flags_t) = {0};
+const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
+ kmp_user_lock_p) = {0};
+kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
+ kmp_user_lock_p) = {0};
// Use different lock pools for different lock types.
-static kmp_indirect_lock_t * __kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = { 0 };
-
-// User lock allocator for dynamically dispatched indirect locks.
-// Every entry of the indirect lock table holds the address and type of the allocated indrect lock
-// (kmp_indirect_lock_t), and the size of the table doubles when it is full. A destroyed indirect lock
-// object is returned to the reusable pool of locks, unique to each lock type.
-kmp_indirect_lock_t *
-__kmp_allocate_indirect_lock(void **user_lock, kmp_int32 gtid, kmp_indirect_locktag_t tag)
-{
- kmp_indirect_lock_t *lck;
- kmp_lock_index_t idx;
-
- __kmp_acquire_lock(&__kmp_global_lock, gtid);
-
- if (__kmp_indirect_lock_pool[tag] != NULL) {
- // Reuse the allocated and destroyed lock object
- lck = __kmp_indirect_lock_pool[tag];
- if (OMP_LOCK_T_SIZE < sizeof(void *))
- idx = lck->lock->pool.index;
- __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
- KA_TRACE(20, ("__kmp_allocate_indirect_lock: reusing an existing lock %p\n", lck));
- } else {
- idx = __kmp_i_lock_table.next;
- // Check capacity and double the size if it is full
- if (idx == __kmp_i_lock_table.size) {
- // Double up the space for block pointers
- int row = __kmp_i_lock_table.size/KMP_I_LOCK_CHUNK;
- kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
- __kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate(2*row*sizeof(kmp_indirect_lock_t *));
- KMP_MEMCPY(__kmp_i_lock_table.table, old_table, row*sizeof(kmp_indirect_lock_t *));
- __kmp_free(old_table);
- // Allocate new objects in the new blocks
- for (int i = row; i < 2*row; ++i)
- *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)
- __kmp_allocate(KMP_I_LOCK_CHUNK*sizeof(kmp_indirect_lock_t));
- __kmp_i_lock_table.size = 2*idx;
- }
- __kmp_i_lock_table.next++;
- lck = KMP_GET_I_LOCK(idx);
- // Allocate a new base lock object
- lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
- KA_TRACE(20, ("__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
- }
+static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
- __kmp_release_lock(&__kmp_global_lock, gtid);
+// User lock allocator for dynamically dispatched indirect locks. Every entry of
+// the indirect lock table holds the address and type of the allocated indrect
+// lock (kmp_indirect_lock_t), and the size of the table doubles when it is
+// full. A destroyed indirect lock object is returned to the reusable pool of
+// locks, unique to each lock type.
+kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock,
+ kmp_int32 gtid,
+ kmp_indirect_locktag_t tag) {
+ kmp_indirect_lock_t *lck;
+ kmp_lock_index_t idx;
+
+ __kmp_acquire_lock(&__kmp_global_lock, gtid);
+
+ if (__kmp_indirect_lock_pool[tag] != NULL) {
+ // Reuse the allocated and destroyed lock object
+ lck = __kmp_indirect_lock_pool[tag];
+ if (OMP_LOCK_T_SIZE < sizeof(void *))
+ idx = lck->lock->pool.index;
+ __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
+ KA_TRACE(20, ("__kmp_allocate_indirect_lock: reusing an existing lock %p\n",
+ lck));
+ } else {
+ idx = __kmp_i_lock_table.next;
+ // Check capacity and double the size if it is full
+ if (idx == __kmp_i_lock_table.size) {
+ // Double up the space for block pointers
+ int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
+ kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
+ __kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate(
+ 2 * row * sizeof(kmp_indirect_lock_t *));
+ KMP_MEMCPY(__kmp_i_lock_table.table, old_table,
+ row * sizeof(kmp_indirect_lock_t *));
+ __kmp_free(old_table);
+ // Allocate new objects in the new blocks
+ for (int i = row; i < 2 * row; ++i)
+ *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate(
+ KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t));
+ __kmp_i_lock_table.size = 2 * idx;
+ }
+ __kmp_i_lock_table.next++;
+ lck = KMP_GET_I_LOCK(idx);
+ // Allocate a new base lock object
+ lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
+ KA_TRACE(20,
+ ("__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
+ }
+
+ __kmp_release_lock(&__kmp_global_lock, gtid);
+
+ lck->type = tag;
+
+ if (OMP_LOCK_T_SIZE < sizeof(void *)) {
+ *((kmp_lock_index_t *)user_lock) = idx
+ << 1; // indirect lock word must be even
+ } else {
+ *((kmp_indirect_lock_t **)user_lock) = lck;
+ }
- lck->type = tag;
+ return lck;
+}
+// User lock lookup for dynamically dispatched locks.
+static __forceinline kmp_indirect_lock_t *
+__kmp_lookup_indirect_lock(void **user_lock, const char *func) {
+ if (__kmp_env_consistency_check) {
+ kmp_indirect_lock_t *lck = NULL;
+ if (user_lock == NULL) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
- *((kmp_lock_index_t *)user_lock) = idx << 1; // indirect lock word must be even.
+ kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
+ if (idx >= __kmp_i_lock_table.size) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ lck = KMP_GET_I_LOCK(idx);
} else {
- *((kmp_indirect_lock_t **)user_lock) = lck;
+ lck = *((kmp_indirect_lock_t **)user_lock);
+ }
+ if (lck == NULL) {
+ KMP_FATAL(LockIsUninitialized, func);
}
-
return lck;
-}
-
-// User lock lookup for dynamically dispatched locks.
-static __forceinline
-kmp_indirect_lock_t *
-__kmp_lookup_indirect_lock(void **user_lock, const char *func)
-{
- if (__kmp_env_consistency_check) {
- kmp_indirect_lock_t *lck = NULL;
- if (user_lock == NULL) {
- KMP_FATAL(LockIsUninitialized, func);
- }
- if (OMP_LOCK_T_SIZE < sizeof(void *)) {
- kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
- if (idx >= __kmp_i_lock_table.size) {
- KMP_FATAL(LockIsUninitialized, func);
- }
- lck = KMP_GET_I_LOCK(idx);
- } else {
- lck = *((kmp_indirect_lock_t **)user_lock);
- }
- if (lck == NULL) {
- KMP_FATAL(LockIsUninitialized, func);
- }
- return lck;
+ } else {
+ if (OMP_LOCK_T_SIZE < sizeof(void *)) {
+ return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
} else {
- if (OMP_LOCK_T_SIZE < sizeof(void *)) {
- return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
- } else {
- return *((kmp_indirect_lock_t **)user_lock);
- }
+ return *((kmp_indirect_lock_t **)user_lock);
}
+ }
}
-static void
-__kmp_init_indirect_lock(kmp_dyna_lock_t * lock, kmp_dyna_lockseq_t seq)
-{
+static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock,
+ kmp_dyna_lockseq_t seq) {
#if KMP_USE_ADAPTIVE_LOCKS
- if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) {
- KMP_WARNING(AdaptiveNotSupported, "kmp_lockseq_t", "adaptive");
- seq = lockseq_queuing;
- }
+ if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) {
+ KMP_WARNING(AdaptiveNotSupported, "kmp_lockseq_t", "adaptive");
+ seq = lockseq_queuing;
+ }
#endif
#if KMP_USE_TSX
- if (seq == lockseq_rtm && !__kmp_cpuinfo.rtm) {
- seq = lockseq_queuing;
- }
+ if (seq == lockseq_rtm && !__kmp_cpuinfo.rtm) {
+ seq = lockseq_queuing;
+ }
#endif
- kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
- kmp_indirect_lock_t *l = __kmp_allocate_indirect_lock((void **)lock, __kmp_entry_gtid(), tag);
- KMP_I_LOCK_FUNC(l, init)(l->lock);
- KA_TRACE(20, ("__kmp_init_indirect_lock: initialized indirect lock with type#%d\n", seq));
+ kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
+ kmp_indirect_lock_t *l =
+ __kmp_allocate_indirect_lock((void **)lock, __kmp_entry_gtid(), tag);
+ KMP_I_LOCK_FUNC(l, init)(l->lock);
+ KA_TRACE(
+ 20, ("__kmp_init_indirect_lock: initialized indirect lock with type#%d\n",
+ seq));
}
-static void
-__kmp_destroy_indirect_lock(kmp_dyna_lock_t * lock)
-{
- kmp_uint32 gtid = __kmp_entry_gtid();
- kmp_indirect_lock_t *l = __kmp_lookup_indirect_lock((void **)lock, "omp_destroy_lock");
- KMP_I_LOCK_FUNC(l, destroy)(l->lock);
- kmp_indirect_locktag_t tag = l->type;
+static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) {
+ kmp_uint32 gtid = __kmp_entry_gtid();
+ kmp_indirect_lock_t *l =
+ __kmp_lookup_indirect_lock((void **)lock, "omp_destroy_lock");
+ KMP_I_LOCK_FUNC(l, destroy)(l->lock);
+ kmp_indirect_locktag_t tag = l->type;
- __kmp_acquire_lock(&__kmp_global_lock, gtid);
+ __kmp_acquire_lock(&__kmp_global_lock, gtid);
- // Use the base lock's space to keep the pool chain.
- l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
- if (OMP_LOCK_T_SIZE < sizeof(void *)) {
- l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
- }
- __kmp_indirect_lock_pool[tag] = l;
+ // Use the base lock's space to keep the pool chain.
+ l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
+ if (OMP_LOCK_T_SIZE < sizeof(void *)) {
+ l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
+ }
+ __kmp_indirect_lock_pool[tag] = l;
- __kmp_release_lock(&__kmp_global_lock, gtid);
+ __kmp_release_lock(&__kmp_global_lock, gtid);
}
-static void
-__kmp_set_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32 gtid)
-{
- kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
- KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
+static void __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
+ kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
+ KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
}
-static int
-__kmp_unset_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32 gtid)
-{
- kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
- return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
+static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
+ kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
+ return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
}
-static int
-__kmp_test_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32 gtid)
-{
- kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
- return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
+static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
+ kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
+ return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
}
-static void
-__kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32 gtid)
-{
- kmp_indirect_lock_t *l = __kmp_lookup_indirect_lock((void **)lock, "omp_set_lock");
- KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
+static void __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
+ kmp_int32 gtid) {
+ kmp_indirect_lock_t *l =
+ __kmp_lookup_indirect_lock((void **)lock, "omp_set_lock");
+ KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
}
-static int
-__kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32 gtid)
-{
- kmp_indirect_lock_t *l = __kmp_lookup_indirect_lock((void **)lock, "omp_unset_lock");
- return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
+static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
+ kmp_int32 gtid) {
+ kmp_indirect_lock_t *l =
+ __kmp_lookup_indirect_lock((void **)lock, "omp_unset_lock");
+ return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
}
-static int
-__kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32 gtid)
-{
- kmp_indirect_lock_t *l = __kmp_lookup_indirect_lock((void **)lock, "omp_test_lock");
- return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
+static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
+ kmp_int32 gtid) {
+ kmp_indirect_lock_t *l =
+ __kmp_lookup_indirect_lock((void **)lock, "omp_test_lock");
+ return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
}
kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
// This is used only in kmp_error.cpp when consistency checking is on.
-kmp_int32
-__kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq)
-{
- switch (seq) {
- case lockseq_tas:
- case lockseq_nested_tas:
- return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
+kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) {
+ switch (seq) {
+ case lockseq_tas:
+ case lockseq_nested_tas:
+ return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
#if KMP_USE_FUTEX
- case lockseq_futex:
- case lockseq_nested_futex:
- return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
-#endif
- case lockseq_ticket:
- case lockseq_nested_ticket:
- return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck);
- case lockseq_queuing:
- case lockseq_nested_queuing:
+ case lockseq_futex:
+ case lockseq_nested_futex:
+ return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
+#endif
+ case lockseq_ticket:
+ case lockseq_nested_ticket:
+ return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck);
+ case lockseq_queuing:
+ case lockseq_nested_queuing:
#if KMP_USE_ADAPTIVE_LOCKS
- case lockseq_adaptive:
+ case lockseq_adaptive:
#endif
- return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck);
- case lockseq_drdpa:
- case lockseq_nested_drdpa:
- return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck);
- default:
- return 0;
- }
+ return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck);
+ case lockseq_drdpa:
+ case lockseq_nested_drdpa:
+ return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck);
+ default:
+ return 0;
+ }
}
// Initializes data for dynamic user locks.
-void
-__kmp_init_dynamic_user_locks()
-{
- // Initialize jump table for the lock functions
- if (__kmp_env_consistency_check) {
- __kmp_direct_set = direct_set_check;
- __kmp_direct_unset = direct_unset_check;
- __kmp_direct_test = direct_test_check;
- __kmp_indirect_set = indirect_set_check;
- __kmp_indirect_unset = indirect_unset_check;
- __kmp_indirect_test = indirect_test_check;
- }
- else {
- __kmp_direct_set = direct_set;
- __kmp_direct_unset = direct_unset;
- __kmp_direct_test = direct_test;
- __kmp_indirect_set = indirect_set;
- __kmp_indirect_unset = indirect_unset;
- __kmp_indirect_test = indirect_test;
- }
- // If the user locks have already been initialized, then return.
- // Allow the switch between different KMP_CONSISTENCY_CHECK values,
- // but do not allocate new lock tables if they have already been
- // allocated.
- if (__kmp_init_user_locks)
- return;
-
- // Initialize lock index table
- __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
- __kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *));
- *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)
- __kmp_allocate(KMP_I_LOCK_CHUNK*sizeof(kmp_indirect_lock_t));
- __kmp_i_lock_table.next = 0;
-
- // Indirect lock size
- __kmp_indirect_lock_size[locktag_ticket] = sizeof(kmp_ticket_lock_t);
- __kmp_indirect_lock_size[locktag_queuing] = sizeof(kmp_queuing_lock_t);
+void __kmp_init_dynamic_user_locks() {
+ // Initialize jump table for the lock functions
+ if (__kmp_env_consistency_check) {
+ __kmp_direct_set = direct_set_check;
+ __kmp_direct_unset = direct_unset_check;
+ __kmp_direct_test = direct_test_check;
+ __kmp_indirect_set = indirect_set_check;
+ __kmp_indirect_unset = indirect_unset_check;
+ __kmp_indirect_test = indirect_test_check;
+ } else {
+ __kmp_direct_set = direct_set;
+ __kmp_direct_unset = direct_unset;
+ __kmp_direct_test = direct_test;
+ __kmp_indirect_set = indirect_set;
+ __kmp_indirect_unset = indirect_unset;
+ __kmp_indirect_test = indirect_test;
+ }
+ // If the user locks have already been initialized, then return. Allow the
+ // switch between different KMP_CONSISTENCY_CHECK values, but do not allocate
+ // new lock tables if they have already been allocated.
+ if (__kmp_init_user_locks)
+ return;
+
+ // Initialize lock index table
+ __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
+ __kmp_i_lock_table.table =
+ (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *));
+ *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate(
+ KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t));
+ __kmp_i_lock_table.next = 0;
+
+ // Indirect lock size
+ __kmp_indirect_lock_size[locktag_ticket] = sizeof(kmp_ticket_lock_t);
+ __kmp_indirect_lock_size[locktag_queuing] = sizeof(kmp_queuing_lock_t);
#if KMP_USE_ADAPTIVE_LOCKS
- __kmp_indirect_lock_size[locktag_adaptive] = sizeof(kmp_adaptive_lock_t);
+ __kmp_indirect_lock_size[locktag_adaptive] = sizeof(kmp_adaptive_lock_t);
#endif
- __kmp_indirect_lock_size[locktag_drdpa] = sizeof(kmp_drdpa_lock_t);
+ __kmp_indirect_lock_size[locktag_drdpa] = sizeof(kmp_drdpa_lock_t);
#if KMP_USE_TSX
- __kmp_indirect_lock_size[locktag_rtm] = sizeof(kmp_queuing_lock_t);
+ __kmp_indirect_lock_size[locktag_rtm] = sizeof(kmp_queuing_lock_t);
#endif
- __kmp_indirect_lock_size[locktag_nested_tas] = sizeof(kmp_tas_lock_t);
+ __kmp_indirect_lock_size[locktag_nested_tas] = sizeof(kmp_tas_lock_t);
#if KMP_USE_FUTEX
- __kmp_indirect_lock_size[locktag_nested_futex] = sizeof(kmp_futex_lock_t);
+ __kmp_indirect_lock_size[locktag_nested_futex] = sizeof(kmp_futex_lock_t);
#endif
- __kmp_indirect_lock_size[locktag_nested_ticket] = sizeof(kmp_ticket_lock_t);
- __kmp_indirect_lock_size[locktag_nested_queuing] = sizeof(kmp_queuing_lock_t);
- __kmp_indirect_lock_size[locktag_nested_drdpa] = sizeof(kmp_drdpa_lock_t);
-
- // Initialize lock accessor/modifier
-#define fill_jumps(table, expand, sep) { \
- table[locktag##sep##ticket] = expand(ticket); \
- table[locktag##sep##queuing] = expand(queuing); \
- table[locktag##sep##drdpa] = expand(drdpa); \
-}
+ __kmp_indirect_lock_size[locktag_nested_ticket] = sizeof(kmp_ticket_lock_t);
+ __kmp_indirect_lock_size[locktag_nested_queuing] = sizeof(kmp_queuing_lock_t);
+ __kmp_indirect_lock_size[locktag_nested_drdpa] = sizeof(kmp_drdpa_lock_t);
+
+// Initialize lock accessor/modifier
+#define fill_jumps(table, expand, sep) \
+ { \
+ table[locktag##sep##ticket] = expand(ticket); \
+ table[locktag##sep##queuing] = expand(queuing); \
+ table[locktag##sep##drdpa] = expand(drdpa); \
+ }
#if KMP_USE_ADAPTIVE_LOCKS
-# define fill_table(table, expand) { \
- fill_jumps(table, expand, _); \
- table[locktag_adaptive] = expand(queuing); \
- fill_jumps(table, expand, _nested_); \
-}
+#define fill_table(table, expand) \
+ { \
+ fill_jumps(table, expand, _); \
+ table[locktag_adaptive] = expand(queuing); \
+ fill_jumps(table, expand, _nested_); \
+ }
#else
-# define fill_table(table, expand) { \
- fill_jumps(table, expand, _); \
- fill_jumps(table, expand, _nested_); \
-}
+#define fill_table(table, expand) \
+ { \
+ fill_jumps(table, expand, _); \
+ fill_jumps(table, expand, _nested_); \
+ }
#endif // KMP_USE_ADAPTIVE_LOCKS
-#define expand(l) (void (*)(kmp_user_lock_p, const ident_t *))__kmp_set_##l##_lock_location
- fill_table(__kmp_indirect_set_location, expand);
+#define expand(l) \
+ (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location
+ fill_table(__kmp_indirect_set_location, expand);
#undef expand
-#define expand(l) (void (*)(kmp_user_lock_p, kmp_lock_flags_t))__kmp_set_##l##_lock_flags
- fill_table(__kmp_indirect_set_flags, expand);
+#define expand(l) \
+ (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags
+ fill_table(__kmp_indirect_set_flags, expand);
#undef expand
-#define expand(l) (const ident_t * (*)(kmp_user_lock_p))__kmp_get_##l##_lock_location
- fill_table(__kmp_indirect_get_location, expand);
+#define expand(l) \
+ (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location
+ fill_table(__kmp_indirect_get_location, expand);
#undef expand
-#define expand(l) (kmp_lock_flags_t (*)(kmp_user_lock_p))__kmp_get_##l##_lock_flags
- fill_table(__kmp_indirect_get_flags, expand);
+#define expand(l) \
+ (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags
+ fill_table(__kmp_indirect_get_flags, expand);
#undef expand
- __kmp_init_user_locks = TRUE;
+ __kmp_init_user_locks = TRUE;
}
// Clean up the lock table.
-void
-__kmp_cleanup_indirect_user_locks()
-{
- kmp_lock_index_t i;
- int k;
-
- // Clean up locks in the pools first (they were already destroyed before going into the pools).
- for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
- kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
- while (l != NULL) {
- kmp_indirect_lock_t *ll = l;
- l = (kmp_indirect_lock_t *)l->lock->pool.next;
- KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: freeing %p from pool\n", ll));
- __kmp_free(ll->lock);
- ll->lock = NULL;
- }
- __kmp_indirect_lock_pool[k] = NULL;
- }
- // Clean up the remaining undestroyed locks.
- for (i = 0; i < __kmp_i_lock_table.next; i++) {
- kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
- if (l->lock != NULL) {
- // Locks not destroyed explicitly need to be destroyed here.
- KMP_I_LOCK_FUNC(l, destroy)(l->lock);
- KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n", l));
- __kmp_free(l->lock);
- }
- }
- // Free the table
- for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
- __kmp_free(__kmp_i_lock_table.table[i]);
- __kmp_free(__kmp_i_lock_table.table);
+void __kmp_cleanup_indirect_user_locks() {
+ kmp_lock_index_t i;
+ int k;
+
+ // Clean up locks in the pools first (they were already destroyed before going
+ // into the pools).
+ for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
+ kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
+ while (l != NULL) {
+ kmp_indirect_lock_t *ll = l;
+ l = (kmp_indirect_lock_t *)l->lock->pool.next;
+ KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: freeing %p from pool\n",
+ ll));
+ __kmp_free(ll->lock);
+ ll->lock = NULL;
+ }
+ __kmp_indirect_lock_pool[k] = NULL;
+ }
+ // Clean up the remaining undestroyed locks.
+ for (i = 0; i < __kmp_i_lock_table.next; i++) {
+ kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
+ if (l->lock != NULL) {
+ // Locks not destroyed explicitly need to be destroyed here.
+ KMP_I_LOCK_FUNC(l, destroy)(l->lock);
+ KA_TRACE(
+ 20,
+ ("__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n",
+ l));
+ __kmp_free(l->lock);
+ }
+ }
+ // Free the table
+ for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
+ __kmp_free(__kmp_i_lock_table.table[i]);
+ __kmp_free(__kmp_i_lock_table.table);
- __kmp_init_user_locks = FALSE;
+ __kmp_init_user_locks = FALSE;
}
enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
-int __kmp_num_locks_in_block = 1; // FIXME - tune this value
+int __kmp_num_locks_in_block = 1; // FIXME - tune this value
#else // KMP_USE_DYNAMIC_LOCK
-/* ------------------------------------------------------------------------ */
/* user locks
- *
* They are implemented as a table of function pointers which are set to the
- * lock functions of the appropriate kind, once that has been determined.
- */
+ * lock functions of the appropriate kind, once that has been determined. */
enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
size_t __kmp_base_user_lock_size = 0;
size_t __kmp_user_lock_size = 0;
-kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck ) = NULL;
-int ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
+kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL;
+int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid) = NULL;
+
+int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid) = NULL;
+int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid) = NULL;
+void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
+void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL;
+void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
+int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid) = NULL;
+
+int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid) = NULL;
+int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid) = NULL;
+void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
+void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
+
+int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL;
+const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL;
+void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
+ const ident_t *loc) = NULL;
+kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL;
+void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
+ kmp_lock_flags_t flags) = NULL;
+
+void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) {
+ switch (user_lock_kind) {
+ case lk_default:
+ default:
+ KMP_ASSERT(0);
+
+ case lk_tas: {
+ __kmp_base_user_lock_size = sizeof(kmp_base_tas_lock_t);
+ __kmp_user_lock_size = sizeof(kmp_tas_lock_t);
-int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
-int ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
-void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
-void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck ) = NULL;
-void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
-int ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
-
-int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
-int ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
-void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
-void ( *__kmp_destroy_nested_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
-
-int ( *__kmp_is_user_lock_initialized_ )( kmp_user_lock_p lck ) = NULL;
-const ident_t * ( *__kmp_get_user_lock_location_ )( kmp_user_lock_p lck ) = NULL;
-void ( *__kmp_set_user_lock_location_ )( kmp_user_lock_p lck, const ident_t *loc ) = NULL;
-kmp_lock_flags_t ( *__kmp_get_user_lock_flags_ )( kmp_user_lock_p lck ) = NULL;
-void ( *__kmp_set_user_lock_flags_ )( kmp_user_lock_p lck, kmp_lock_flags_t flags ) = NULL;
-
-void __kmp_set_user_lock_vptrs( kmp_lock_kind_t user_lock_kind )
-{
- switch ( user_lock_kind ) {
- case lk_default:
- default:
- KMP_ASSERT( 0 );
-
- case lk_tas: {
- __kmp_base_user_lock_size = sizeof( kmp_base_tas_lock_t );
- __kmp_user_lock_size = sizeof( kmp_tas_lock_t );
-
- __kmp_get_user_lock_owner_ =
- ( kmp_int32 ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_tas_lock_owner );
-
- if ( __kmp_env_consistency_check ) {
- KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
- KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
- }
- else {
- KMP_BIND_USER_LOCK(tas);
- KMP_BIND_NESTED_USER_LOCK(tas);
- }
-
- __kmp_destroy_user_lock_ =
- ( void ( * )( kmp_user_lock_p ) )
- ( &__kmp_destroy_tas_lock );
-
- __kmp_is_user_lock_initialized_ =
- ( int ( * )( kmp_user_lock_p ) ) NULL;
-
- __kmp_get_user_lock_location_ =
- ( const ident_t * ( * )( kmp_user_lock_p ) ) NULL;
+ __kmp_get_user_lock_owner_ =
+ (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner);
- __kmp_set_user_lock_location_ =
- ( void ( * )( kmp_user_lock_p, const ident_t * ) ) NULL;
+ if (__kmp_env_consistency_check) {
+ KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
+ KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
+ } else {
+ KMP_BIND_USER_LOCK(tas);
+ KMP_BIND_NESTED_USER_LOCK(tas);
+ }
- __kmp_get_user_lock_flags_ =
- ( kmp_lock_flags_t ( * )( kmp_user_lock_p ) ) NULL;
+ __kmp_destroy_user_lock_ =
+ (void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock);
- __kmp_set_user_lock_flags_ =
- ( void ( * )( kmp_user_lock_p, kmp_lock_flags_t ) ) NULL;
- }
- break;
+ __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
+
+ __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
+
+ __kmp_set_user_lock_location_ =
+ (void (*)(kmp_user_lock_p, const ident_t *))NULL;
+
+ __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
+
+ __kmp_set_user_lock_flags_ =
+ (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
+ } break;
#if KMP_USE_FUTEX
- case lk_futex: {
- __kmp_base_user_lock_size = sizeof( kmp_base_futex_lock_t );
- __kmp_user_lock_size = sizeof( kmp_futex_lock_t );
-
- __kmp_get_user_lock_owner_ =
- ( kmp_int32 ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_futex_lock_owner );
-
- if ( __kmp_env_consistency_check ) {
- KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
- KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
- }
- else {
- KMP_BIND_USER_LOCK(futex);
- KMP_BIND_NESTED_USER_LOCK(futex);
- }
-
- __kmp_destroy_user_lock_ =
- ( void ( * )( kmp_user_lock_p ) )
- ( &__kmp_destroy_futex_lock );
-
- __kmp_is_user_lock_initialized_ =
- ( int ( * )( kmp_user_lock_p ) ) NULL;
-
- __kmp_get_user_lock_location_ =
- ( const ident_t * ( * )( kmp_user_lock_p ) ) NULL;
+ case lk_futex: {
+ __kmp_base_user_lock_size = sizeof(kmp_base_futex_lock_t);
+ __kmp_user_lock_size = sizeof(kmp_futex_lock_t);
- __kmp_set_user_lock_location_ =
- ( void ( * )( kmp_user_lock_p, const ident_t * ) ) NULL;
+ __kmp_get_user_lock_owner_ =
+ (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner);
- __kmp_get_user_lock_flags_ =
- ( kmp_lock_flags_t ( * )( kmp_user_lock_p ) ) NULL;
+ if (__kmp_env_consistency_check) {
+ KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
+ KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
+ } else {
+ KMP_BIND_USER_LOCK(futex);
+ KMP_BIND_NESTED_USER_LOCK(futex);
+ }
- __kmp_set_user_lock_flags_ =
- ( void ( * )( kmp_user_lock_p, kmp_lock_flags_t ) ) NULL;
- }
- break;
+ __kmp_destroy_user_lock_ =
+ (void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock);
+
+ __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
+
+ __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
+
+ __kmp_set_user_lock_location_ =
+ (void (*)(kmp_user_lock_p, const ident_t *))NULL;
+
+ __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
+
+ __kmp_set_user_lock_flags_ =
+ (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
+ } break;
#endif // KMP_USE_FUTEX
- case lk_ticket: {
- __kmp_base_user_lock_size = sizeof( kmp_base_ticket_lock_t );
- __kmp_user_lock_size = sizeof( kmp_ticket_lock_t );
-
- __kmp_get_user_lock_owner_ =
- ( kmp_int32 ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_ticket_lock_owner );
-
- if ( __kmp_env_consistency_check ) {
- KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
- KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
- }
- else {
- KMP_BIND_USER_LOCK(ticket);
- KMP_BIND_NESTED_USER_LOCK(ticket);
- }
-
- __kmp_destroy_user_lock_ =
- ( void ( * )( kmp_user_lock_p ) )
- ( &__kmp_destroy_ticket_lock );
-
- __kmp_is_user_lock_initialized_ =
- ( int ( * )( kmp_user_lock_p ) )
- ( &__kmp_is_ticket_lock_initialized );
-
- __kmp_get_user_lock_location_ =
- ( const ident_t * ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_ticket_lock_location );
-
- __kmp_set_user_lock_location_ =
- ( void ( * )( kmp_user_lock_p, const ident_t * ) )
- ( &__kmp_set_ticket_lock_location );
-
- __kmp_get_user_lock_flags_ =
- ( kmp_lock_flags_t ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_ticket_lock_flags );
-
- __kmp_set_user_lock_flags_ =
- ( void ( * )( kmp_user_lock_p, kmp_lock_flags_t ) )
- ( &__kmp_set_ticket_lock_flags );
- }
- break;
+ case lk_ticket: {
+ __kmp_base_user_lock_size = sizeof(kmp_base_ticket_lock_t);
+ __kmp_user_lock_size = sizeof(kmp_ticket_lock_t);
- case lk_queuing: {
- __kmp_base_user_lock_size = sizeof( kmp_base_queuing_lock_t );
- __kmp_user_lock_size = sizeof( kmp_queuing_lock_t );
-
- __kmp_get_user_lock_owner_ =
- ( kmp_int32 ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_queuing_lock_owner );
-
- if ( __kmp_env_consistency_check ) {
- KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
- KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
- }
- else {
- KMP_BIND_USER_LOCK(queuing);
- KMP_BIND_NESTED_USER_LOCK(queuing);
- }
-
- __kmp_destroy_user_lock_ =
- ( void ( * )( kmp_user_lock_p ) )
- ( &__kmp_destroy_queuing_lock );
-
- __kmp_is_user_lock_initialized_ =
- ( int ( * )( kmp_user_lock_p ) )
- ( &__kmp_is_queuing_lock_initialized );
-
- __kmp_get_user_lock_location_ =
- ( const ident_t * ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_queuing_lock_location );
-
- __kmp_set_user_lock_location_ =
- ( void ( * )( kmp_user_lock_p, const ident_t * ) )
- ( &__kmp_set_queuing_lock_location );
-
- __kmp_get_user_lock_flags_ =
- ( kmp_lock_flags_t ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_queuing_lock_flags );
-
- __kmp_set_user_lock_flags_ =
- ( void ( * )( kmp_user_lock_p, kmp_lock_flags_t ) )
- ( &__kmp_set_queuing_lock_flags );
- }
- break;
+ __kmp_get_user_lock_owner_ =
+ (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner);
+
+ if (__kmp_env_consistency_check) {
+ KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
+ KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
+ } else {
+ KMP_BIND_USER_LOCK(ticket);
+ KMP_BIND_NESTED_USER_LOCK(ticket);
+ }
+
+ __kmp_destroy_user_lock_ =
+ (void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock);
+
+ __kmp_is_user_lock_initialized_ =
+ (int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized);
+
+ __kmp_get_user_lock_location_ =
+ (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location);
+
+ __kmp_set_user_lock_location_ = (void (*)(
+ kmp_user_lock_p, const ident_t *))(&__kmp_set_ticket_lock_location);
+
+ __kmp_get_user_lock_flags_ =
+ (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags);
+
+ __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
+ &__kmp_set_ticket_lock_flags);
+ } break;
+
+ case lk_queuing: {
+ __kmp_base_user_lock_size = sizeof(kmp_base_queuing_lock_t);
+ __kmp_user_lock_size = sizeof(kmp_queuing_lock_t);
+
+ __kmp_get_user_lock_owner_ =
+ (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
+
+ if (__kmp_env_consistency_check) {
+ KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
+ KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
+ } else {
+ KMP_BIND_USER_LOCK(queuing);
+ KMP_BIND_NESTED_USER_LOCK(queuing);
+ }
+
+ __kmp_destroy_user_lock_ =
+ (void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock);
+
+ __kmp_is_user_lock_initialized_ =
+ (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
+
+ __kmp_get_user_lock_location_ =
+ (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
+
+ __kmp_set_user_lock_location_ = (void (*)(
+ kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
+
+ __kmp_get_user_lock_flags_ =
+ (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
+
+ __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
+ &__kmp_set_queuing_lock_flags);
+ } break;
#if KMP_USE_ADAPTIVE_LOCKS
- case lk_adaptive: {
- __kmp_base_user_lock_size = sizeof( kmp_base_adaptive_lock_t );
- __kmp_user_lock_size = sizeof( kmp_adaptive_lock_t );
-
- __kmp_get_user_lock_owner_ =
- ( kmp_int32 ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_queuing_lock_owner );
-
- if ( __kmp_env_consistency_check ) {
- KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
- }
- else {
- KMP_BIND_USER_LOCK(adaptive);
- }
-
- __kmp_destroy_user_lock_ =
- ( void ( * )( kmp_user_lock_p ) )
- ( &__kmp_destroy_adaptive_lock );
-
- __kmp_is_user_lock_initialized_ =
- ( int ( * )( kmp_user_lock_p ) )
- ( &__kmp_is_queuing_lock_initialized );
-
- __kmp_get_user_lock_location_ =
- ( const ident_t * ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_queuing_lock_location );
-
- __kmp_set_user_lock_location_ =
- ( void ( * )( kmp_user_lock_p, const ident_t * ) )
- ( &__kmp_set_queuing_lock_location );
-
- __kmp_get_user_lock_flags_ =
- ( kmp_lock_flags_t ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_queuing_lock_flags );
-
- __kmp_set_user_lock_flags_ =
- ( void ( * )( kmp_user_lock_p, kmp_lock_flags_t ) )
- ( &__kmp_set_queuing_lock_flags );
+ case lk_adaptive: {
+ __kmp_base_user_lock_size = sizeof(kmp_base_adaptive_lock_t);
+ __kmp_user_lock_size = sizeof(kmp_adaptive_lock_t);
- }
- break;
+ __kmp_get_user_lock_owner_ =
+ (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
+
+ if (__kmp_env_consistency_check) {
+ KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
+ } else {
+ KMP_BIND_USER_LOCK(adaptive);
+ }
+
+ __kmp_destroy_user_lock_ =
+ (void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock);
+
+ __kmp_is_user_lock_initialized_ =
+ (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
+
+ __kmp_get_user_lock_location_ =
+ (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
+
+ __kmp_set_user_lock_location_ = (void (*)(
+ kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
+
+ __kmp_get_user_lock_flags_ =
+ (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
+
+ __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
+ &__kmp_set_queuing_lock_flags);
+
+ } break;
#endif // KMP_USE_ADAPTIVE_LOCKS
- case lk_drdpa: {
- __kmp_base_user_lock_size = sizeof( kmp_base_drdpa_lock_t );
- __kmp_user_lock_size = sizeof( kmp_drdpa_lock_t );
-
- __kmp_get_user_lock_owner_ =
- ( kmp_int32 ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_drdpa_lock_owner );
-
- if ( __kmp_env_consistency_check ) {
- KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
- KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
- }
- else {
- KMP_BIND_USER_LOCK(drdpa);
- KMP_BIND_NESTED_USER_LOCK(drdpa);
- }
-
- __kmp_destroy_user_lock_ =
- ( void ( * )( kmp_user_lock_p ) )
- ( &__kmp_destroy_drdpa_lock );
-
- __kmp_is_user_lock_initialized_ =
- ( int ( * )( kmp_user_lock_p ) )
- ( &__kmp_is_drdpa_lock_initialized );
-
- __kmp_get_user_lock_location_ =
- ( const ident_t * ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_drdpa_lock_location );
-
- __kmp_set_user_lock_location_ =
- ( void ( * )( kmp_user_lock_p, const ident_t * ) )
- ( &__kmp_set_drdpa_lock_location );
-
- __kmp_get_user_lock_flags_ =
- ( kmp_lock_flags_t ( * )( kmp_user_lock_p ) )
- ( &__kmp_get_drdpa_lock_flags );
-
- __kmp_set_user_lock_flags_ =
- ( void ( * )( kmp_user_lock_p, kmp_lock_flags_t ) )
- ( &__kmp_set_drdpa_lock_flags );
- }
- break;
+ case lk_drdpa: {
+ __kmp_base_user_lock_size = sizeof(kmp_base_drdpa_lock_t);
+ __kmp_user_lock_size = sizeof(kmp_drdpa_lock_t);
+
+ __kmp_get_user_lock_owner_ =
+ (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner);
+
+ if (__kmp_env_consistency_check) {
+ KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
+ KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
+ } else {
+ KMP_BIND_USER_LOCK(drdpa);
+ KMP_BIND_NESTED_USER_LOCK(drdpa);
}
-}
+ __kmp_destroy_user_lock_ =
+ (void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock);
+
+ __kmp_is_user_lock_initialized_ =
+ (int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized);
+
+ __kmp_get_user_lock_location_ =
+ (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location);
+
+ __kmp_set_user_lock_location_ = (void (*)(
+ kmp_user_lock_p, const ident_t *))(&__kmp_set_drdpa_lock_location);
+
+ __kmp_get_user_lock_flags_ =
+ (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags);
+
+ __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
+ &__kmp_set_drdpa_lock_flags);
+ } break;
+ }
+}
// ----------------------------------------------------------------------------
// User lock table & lock allocation
-kmp_lock_table_t __kmp_user_lock_table = { 1, 0, NULL };
+kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL};
kmp_user_lock_p __kmp_lock_pool = NULL;
// Lock block-allocation support.
-kmp_block_of_locks* __kmp_lock_blocks = NULL;
-int __kmp_num_locks_in_block = 1; // FIXME - tune this value
+kmp_block_of_locks *__kmp_lock_blocks = NULL;
+int __kmp_num_locks_in_block = 1; // FIXME - tune this value
-static kmp_lock_index_t
-__kmp_lock_table_insert( kmp_user_lock_p lck )
-{
- // Assume that kmp_global_lock is held upon entry/exit.
- kmp_lock_index_t index;
- if ( __kmp_user_lock_table.used >= __kmp_user_lock_table.allocated ) {
- kmp_lock_index_t size;
- kmp_user_lock_p *table;
- // Reallocate lock table.
- if ( __kmp_user_lock_table.allocated == 0 ) {
- size = 1024;
- }
- else {
- size = __kmp_user_lock_table.allocated * 2;
- }
- table = (kmp_user_lock_p *)__kmp_allocate( sizeof( kmp_user_lock_p ) * size );
- KMP_MEMCPY( table + 1, __kmp_user_lock_table.table + 1, sizeof( kmp_user_lock_p ) * ( __kmp_user_lock_table.used - 1 ) );
- table[ 0 ] = (kmp_user_lock_p)__kmp_user_lock_table.table;
- // We cannot free the previous table now, since it may be in use by other
- // threads. So save the pointer to the previous table in in the first element of the
- // new table. All the tables will be organized into a list, and could be freed when
- // library shutting down.
- __kmp_user_lock_table.table = table;
- __kmp_user_lock_table.allocated = size;
- }
- KMP_DEBUG_ASSERT( __kmp_user_lock_table.used < __kmp_user_lock_table.allocated );
- index = __kmp_user_lock_table.used;
- __kmp_user_lock_table.table[ index ] = lck;
- ++ __kmp_user_lock_table.used;
- return index;
-}
-
-static kmp_user_lock_p
-__kmp_lock_block_allocate()
-{
- // Assume that kmp_global_lock is held upon entry/exit.
- static int last_index = 0;
- if ( ( last_index >= __kmp_num_locks_in_block )
- || ( __kmp_lock_blocks == NULL ) ) {
- // Restart the index.
- last_index = 0;
- // Need to allocate a new block.
- KMP_DEBUG_ASSERT( __kmp_user_lock_size > 0 );
- size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
- char* buffer = (char*)__kmp_allocate( space_for_locks + sizeof( kmp_block_of_locks ) );
- // Set up the new block.
- kmp_block_of_locks *new_block = (kmp_block_of_locks *)(& buffer[space_for_locks]);
- new_block->next_block = __kmp_lock_blocks;
- new_block->locks = (void *)buffer;
- // Publish the new block.
- KMP_MB();
- __kmp_lock_blocks = new_block;
+static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) {
+ // Assume that kmp_global_lock is held upon entry/exit.
+ kmp_lock_index_t index;
+ if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) {
+ kmp_lock_index_t size;
+ kmp_user_lock_p *table;
+ // Reallocate lock table.
+ if (__kmp_user_lock_table.allocated == 0) {
+ size = 1024;
+ } else {
+ size = __kmp_user_lock_table.allocated * 2;
}
- kmp_user_lock_p ret = (kmp_user_lock_p)(& ( ( (char *)( __kmp_lock_blocks->locks ) )
- [ last_index * __kmp_user_lock_size ] ) );
- last_index++;
- return ret;
+ table = (kmp_user_lock_p *)__kmp_allocate(sizeof(kmp_user_lock_p) * size);
+ KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1,
+ sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1));
+ table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table;
+ // We cannot free the previous table now, since it may be in use by other
+ // threads. So save the pointer to the previous table in in the first
+ // element of the new table. All the tables will be organized into a list,
+ // and could be freed when library shutting down.
+ __kmp_user_lock_table.table = table;
+ __kmp_user_lock_table.allocated = size;
+ }
+ KMP_DEBUG_ASSERT(__kmp_user_lock_table.used <
+ __kmp_user_lock_table.allocated);
+ index = __kmp_user_lock_table.used;
+ __kmp_user_lock_table.table[index] = lck;
+ ++__kmp_user_lock_table.used;
+ return index;
+}
+
+static kmp_user_lock_p __kmp_lock_block_allocate() {
+ // Assume that kmp_global_lock is held upon entry/exit.
+ static int last_index = 0;
+ if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) {
+ // Restart the index.
+ last_index = 0;
+ // Need to allocate a new block.
+ KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
+ size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
+ char *buffer =
+ (char *)__kmp_allocate(space_for_locks + sizeof(kmp_block_of_locks));
+ // Set up the new block.
+ kmp_block_of_locks *new_block =
+ (kmp_block_of_locks *)(&buffer[space_for_locks]);
+ new_block->next_block = __kmp_lock_blocks;
+ new_block->locks = (void *)buffer;
+ // Publish the new block.
+ KMP_MB();
+ __kmp_lock_blocks = new_block;
+ }
+ kmp_user_lock_p ret = (kmp_user_lock_p)(&(
+ ((char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size]));
+ last_index++;
+ return ret;
}
-//
// Get memory for a lock. It may be freshly allocated memory or reused memory
// from lock pool.
-//
-kmp_user_lock_p
-__kmp_user_lock_allocate( void **user_lock, kmp_int32 gtid,
- kmp_lock_flags_t flags )
-{
- kmp_user_lock_p lck;
- kmp_lock_index_t index;
- KMP_DEBUG_ASSERT( user_lock );
-
- __kmp_acquire_lock( &__kmp_global_lock, gtid );
-
- if ( __kmp_lock_pool == NULL ) {
- // Lock pool is empty. Allocate new memory.
-
- // ANNOTATION: Found no good way to express the syncronisation
- // between allocation and usage, so ignore the allocation
- ANNOTATE_IGNORE_WRITES_BEGIN();
- if ( __kmp_num_locks_in_block <= 1 ) { // Tune this cutoff point.
- lck = (kmp_user_lock_p) __kmp_allocate( __kmp_user_lock_size );
- }
- else {
- lck = __kmp_lock_block_allocate();
- }
- ANNOTATE_IGNORE_WRITES_END();
-
- // Insert lock in the table so that it can be freed in __kmp_cleanup,
- // and debugger has info on all allocated locks.
- index = __kmp_lock_table_insert( lck );
- }
- else {
- // Pick up lock from pool.
- lck = __kmp_lock_pool;
- index = __kmp_lock_pool->pool.index;
- __kmp_lock_pool = __kmp_lock_pool->pool.next;
+kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, kmp_int32 gtid,
+ kmp_lock_flags_t flags) {
+ kmp_user_lock_p lck;
+ kmp_lock_index_t index;
+ KMP_DEBUG_ASSERT(user_lock);
+
+ __kmp_acquire_lock(&__kmp_global_lock, gtid);
+
+ if (__kmp_lock_pool == NULL) {
+ // Lock pool is empty. Allocate new memory.
+
+ // ANNOTATION: Found no good way to express the syncronisation
+ // between allocation and usage, so ignore the allocation
+ ANNOTATE_IGNORE_WRITES_BEGIN();
+ if (__kmp_num_locks_in_block <= 1) { // Tune this cutoff point.
+ lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
+ } else {
+ lck = __kmp_lock_block_allocate();
}
+ ANNOTATE_IGNORE_WRITES_END();
- //
- // We could potentially differentiate between nested and regular locks
- // here, and do the lock table lookup for regular locks only.
- //
- if ( OMP_LOCK_T_SIZE < sizeof(void *) ) {
- * ( (kmp_lock_index_t *) user_lock ) = index;
- }
- else {
- * ( (kmp_user_lock_p *) user_lock ) = lck;
- }
+ // Insert lock in the table so that it can be freed in __kmp_cleanup,
+ // and debugger has info on all allocated locks.
+ index = __kmp_lock_table_insert(lck);
+ } else {
+ // Pick up lock from pool.
+ lck = __kmp_lock_pool;
+ index = __kmp_lock_pool->pool.index;
+ __kmp_lock_pool = __kmp_lock_pool->pool.next;
+ }
+
+ // We could potentially differentiate between nested and regular locks
+ // here, and do the lock table lookup for regular locks only.
+ if (OMP_LOCK_T_SIZE < sizeof(void *)) {
+ *((kmp_lock_index_t *)user_lock) = index;
+ } else {
+ *((kmp_user_lock_p *)user_lock) = lck;
+ }
- // mark the lock if it is critical section lock.
- __kmp_set_user_lock_flags( lck, flags );
+ // mark the lock if it is critical section lock.
+ __kmp_set_user_lock_flags(lck, flags);
- __kmp_release_lock( & __kmp_global_lock, gtid ); // AC: TODO: move this line upper
+ __kmp_release_lock(&__kmp_global_lock, gtid); // AC: TODO move this line upper
- return lck;
+ return lck;
}
// Put lock's memory to pool for reusing.
-void
-__kmp_user_lock_free( void **user_lock, kmp_int32 gtid, kmp_user_lock_p lck )
-{
- KMP_DEBUG_ASSERT( user_lock != NULL );
- KMP_DEBUG_ASSERT( lck != NULL );
-
- __kmp_acquire_lock( & __kmp_global_lock, gtid );
-
- lck->pool.next = __kmp_lock_pool;
- __kmp_lock_pool = lck;
- if ( OMP_LOCK_T_SIZE < sizeof(void *) ) {
- kmp_lock_index_t index = * ( (kmp_lock_index_t *) user_lock );
- KMP_DEBUG_ASSERT( 0 < index && index <= __kmp_user_lock_table.used );
- lck->pool.index = index;
- }
-
- __kmp_release_lock( & __kmp_global_lock, gtid );
-}
-
-kmp_user_lock_p
-__kmp_lookup_user_lock( void **user_lock, char const *func )
-{
- kmp_user_lock_p lck = NULL;
+void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
+ kmp_user_lock_p lck) {
+ KMP_DEBUG_ASSERT(user_lock != NULL);
+ KMP_DEBUG_ASSERT(lck != NULL);
- if ( __kmp_env_consistency_check ) {
- if ( user_lock == NULL ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- }
-
- if ( OMP_LOCK_T_SIZE < sizeof(void *) ) {
- kmp_lock_index_t index = *( (kmp_lock_index_t *)user_lock );
- if ( __kmp_env_consistency_check ) {
- if ( ! ( 0 < index && index < __kmp_user_lock_table.used ) ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- }
- KMP_DEBUG_ASSERT( 0 < index && index < __kmp_user_lock_table.used );
- KMP_DEBUG_ASSERT( __kmp_user_lock_size > 0 );
- lck = __kmp_user_lock_table.table[index];
- }
- else {
- lck = *( (kmp_user_lock_p *)user_lock );
- }
+ __kmp_acquire_lock(&__kmp_global_lock, gtid);
- if ( __kmp_env_consistency_check ) {
- if ( lck == NULL ) {
- KMP_FATAL( LockIsUninitialized, func );
- }
- }
+ lck->pool.next = __kmp_lock_pool;
+ __kmp_lock_pool = lck;
+ if (OMP_LOCK_T_SIZE < sizeof(void *)) {
+ kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
+ KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used);
+ lck->pool.index = index;
+ }
- return lck;
+ __kmp_release_lock(&__kmp_global_lock, gtid);
}
-void
-__kmp_cleanup_user_locks( void )
-{
- //
- // Reset lock pool. Do not worry about lock in the pool -- we will free
- // them when iterating through lock table (it includes all the locks,
- // dead or alive).
- //
- __kmp_lock_pool = NULL;
-
-#define IS_CRITICAL(lck) \
- ( ( __kmp_get_user_lock_flags_ != NULL ) && \
- ( ( *__kmp_get_user_lock_flags_ )( lck ) & kmp_lf_critical_section ) )
+kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock, char const *func) {
+ kmp_user_lock_p lck = NULL;
- //
- // Loop through lock table, free all locks.
- //
- // Do not free item [0], it is reserved for lock tables list.
- //
- // FIXME - we are iterating through a list of (pointers to) objects of
- // type union kmp_user_lock, but we have no way of knowing whether the
- // base type is currently "pool" or whatever the global user lock type
- // is.
- //
- // We are relying on the fact that for all of the user lock types
- // (except "tas"), the first field in the lock struct is the "initialized"
- // field, which is set to the address of the lock object itself when
- // the lock is initialized. When the union is of type "pool", the
- // first field is a pointer to the next object in the free list, which
- // will not be the same address as the object itself.
- //
- // This means that the check ( *__kmp_is_user_lock_initialized_ )( lck )
- // will fail for "pool" objects on the free list. This must happen as
- // the "location" field of real user locks overlaps the "index" field
- // of "pool" objects.
- //
- // It would be better to run through the free list, and remove all "pool"
- // objects from the lock table before executing this loop. However,
- // "pool" objects do not always have their index field set (only on
- // lin_32e), and I don't want to search the lock table for the address
- // of every "pool" object on the free list.
- //
- while ( __kmp_user_lock_table.used > 1 ) {
- const ident *loc;
+ if (__kmp_env_consistency_check) {
+ if (user_lock == NULL) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ }
- //
- // reduce __kmp_user_lock_table.used before freeing the lock,
- // so that state of locks is consistent
- //
- kmp_user_lock_p lck = __kmp_user_lock_table.table[
- --__kmp_user_lock_table.used ];
-
- if ( ( __kmp_is_user_lock_initialized_ != NULL ) &&
- ( *__kmp_is_user_lock_initialized_ )( lck ) ) {
- //
- // Issue a warning if: KMP_CONSISTENCY_CHECK AND lock is
- // initialized AND it is NOT a critical section (user is not
- // responsible for destroying criticals) AND we know source
- // location to report.
- //
- if ( __kmp_env_consistency_check && ( ! IS_CRITICAL( lck ) ) &&
- ( ( loc = __kmp_get_user_lock_location( lck ) ) != NULL ) &&
- ( loc->psource != NULL ) ) {
- kmp_str_loc_t str_loc = __kmp_str_loc_init( loc->psource, 0 );
- KMP_WARNING( CnsLockNotDestroyed, str_loc.file, str_loc.line );
- __kmp_str_loc_free( &str_loc);
- }
+ if (OMP_LOCK_T_SIZE < sizeof(void *)) {
+ kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
+ if (__kmp_env_consistency_check) {
+ if (!(0 < index && index < __kmp_user_lock_table.used)) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ }
+ KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used);
+ KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
+ lck = __kmp_user_lock_table.table[index];
+ } else {
+ lck = *((kmp_user_lock_p *)user_lock);
+ }
+
+ if (__kmp_env_consistency_check) {
+ if (lck == NULL) {
+ KMP_FATAL(LockIsUninitialized, func);
+ }
+ }
+
+ return lck;
+}
+
+void __kmp_cleanup_user_locks(void) {
+ // Reset lock pool. Don't worry about lock in the pool--we will free them when
+ // iterating through lock table (it includes all the locks, dead or alive).
+ __kmp_lock_pool = NULL;
+
+#define IS_CRITICAL(lck) \
+ ((__kmp_get_user_lock_flags_ != NULL) && \
+ ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section))
+
+ // Loop through lock table, free all locks.
+ // Do not free item [0], it is reserved for lock tables list.
+ //
+ // FIXME - we are iterating through a list of (pointers to) objects of type
+ // union kmp_user_lock, but we have no way of knowing whether the base type is
+ // currently "pool" or whatever the global user lock type is.
+ //
+ // We are relying on the fact that for all of the user lock types
+ // (except "tas"), the first field in the lock struct is the "initialized"
+ // field, which is set to the address of the lock object itself when
+ // the lock is initialized. When the union is of type "pool", the
+ // first field is a pointer to the next object in the free list, which
+ // will not be the same address as the object itself.
+ //
+ // This means that the check (*__kmp_is_user_lock_initialized_)(lck) will fail
+ // for "pool" objects on the free list. This must happen as the "location"
+ // field of real user locks overlaps the "index" field of "pool" objects.
+ //
+ // It would be better to run through the free list, and remove all "pool"
+ // objects from the lock table before executing this loop. However,
+ // "pool" objects do not always have their index field set (only on
+ // lin_32e), and I don't want to search the lock table for the address
+ // of every "pool" object on the free list.
+ while (__kmp_user_lock_table.used > 1) {
+ const ident *loc;
+
+ // reduce __kmp_user_lock_table.used before freeing the lock,
+ // so that state of locks is consistent
+ kmp_user_lock_p lck =
+ __kmp_user_lock_table.table[--__kmp_user_lock_table.used];
+
+ if ((__kmp_is_user_lock_initialized_ != NULL) &&
+ (*__kmp_is_user_lock_initialized_)(lck)) {
+ // Issue a warning if: KMP_CONSISTENCY_CHECK AND lock is initialized AND
+ // it is NOT a critical section (user is not responsible for destroying
+ // criticals) AND we know source location to report.
+ if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) &&
+ ((loc = __kmp_get_user_lock_location(lck)) != NULL) &&
+ (loc->psource != NULL)) {
+ kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->psource, 0);
+ KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line);
+ __kmp_str_loc_free(&str_loc);
+ }
#ifdef KMP_DEBUG
- if ( IS_CRITICAL( lck ) ) {
- KA_TRACE( 20, ("__kmp_cleanup_user_locks: free critical section lock %p (%p)\n", lck, *(void**)lck ) );
- }
- else {
- KA_TRACE( 20, ("__kmp_cleanup_user_locks: free lock %p (%p)\n", lck, *(void**)lck ) );
- }
+ if (IS_CRITICAL(lck)) {
+ KA_TRACE(
+ 20,
+ ("__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
+ lck, *(void **)lck));
+ } else {
+ KA_TRACE(20, ("__kmp_cleanup_user_locks: free lock %p (%p)\n", lck,
+ *(void **)lck));
+ }
#endif // KMP_DEBUG
- //
- // Cleanup internal lock dynamic resources
- // (for drdpa locks particularly).
- //
- __kmp_destroy_user_lock( lck );
- }
-
- //
- // Free the lock if block allocation of locks is not used.
- //
- if ( __kmp_lock_blocks == NULL ) {
- __kmp_free( lck );
- }
+ // Cleanup internal lock dynamic resources (for drdpa locks particularly).
+ __kmp_destroy_user_lock(lck);
}
-#undef IS_CRITICAL
-
- //
- // delete lock table(s).
- //
- kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
- __kmp_user_lock_table.table = NULL;
- __kmp_user_lock_table.allocated = 0;
-
- while ( table_ptr != NULL ) {
- //
- // In the first element we saved the pointer to the previous
- // (smaller) lock table.
- //
- kmp_user_lock_p *next = (kmp_user_lock_p *)( table_ptr[ 0 ] );
- __kmp_free( table_ptr );
- table_ptr = next;
+ // Free the lock if block allocation of locks is not used.
+ if (__kmp_lock_blocks == NULL) {
+ __kmp_free(lck);
}
+ }
- //
- // Free buffers allocated for blocks of locks.
- //
- kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
- __kmp_lock_blocks = NULL;
+#undef IS_CRITICAL
- while ( block_ptr != NULL ) {
- kmp_block_of_locks_t *next = block_ptr->next_block;
- __kmp_free( block_ptr->locks );
- //
- // *block_ptr itself was allocated at the end of the locks vector.
- //
- block_ptr = next;
- }
+ // delete lock table(s).
+ kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
+ __kmp_user_lock_table.table = NULL;
+ __kmp_user_lock_table.allocated = 0;
+
+ while (table_ptr != NULL) {
+ // In the first element we saved the pointer to the previous
+ // (smaller) lock table.
+ kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]);
+ __kmp_free(table_ptr);
+ table_ptr = next;
+ }
+
+ // Free buffers allocated for blocks of locks.
+ kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
+ __kmp_lock_blocks = NULL;
+
+ while (block_ptr != NULL) {
+ kmp_block_of_locks_t *next = block_ptr->next_block;
+ __kmp_free(block_ptr->locks);
+ // *block_ptr itself was allocated at the end of the locks vector.
+ block_ptr = next;
+ }
- TCW_4(__kmp_init_user_locks, FALSE);
+ TCW_4(__kmp_init_user_locks, FALSE);
}
#endif // KMP_USE_DYNAMIC_LOCK
Modified: openmp/trunk/runtime/src/kmp_lock.h
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_lock.h?rev=302929&r1=302928&r2=302929&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_lock.h (original)
+++ openmp/trunk/runtime/src/kmp_lock.h Fri May 12 13:01:32 2017
@@ -16,11 +16,11 @@
#ifndef KMP_LOCK_H
#define KMP_LOCK_H
-#include <limits.h> // CHAR_BIT
-#include <stddef.h> // offsetof
+#include <limits.h> // CHAR_BIT
+#include <stddef.h> // offsetof
-#include "kmp_os.h"
#include "kmp_debug.h"
+#include "kmp_os.h"
#ifdef __cplusplus
#include <atomic>
@@ -32,7 +32,8 @@ extern "C" {
// Have to copy these definitions from kmp.h because kmp.h cannot be included
// due to circular dependencies. Will undef these at end of file.
-#define KMP_PAD(type, sz) (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
+#define KMP_PAD(type, sz) \
+ (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
#define KMP_GTID_DNE (-2)
// Forward declaration of ident and ident_t
@@ -43,7 +44,6 @@ typedef struct ident ident_t;
// End of copied code.
// ----------------------------------------------------------------------------
-//
// We need to know the size of the area we can assume that the compiler(s)
// allocated for obects of type omp_lock_t and omp_nest_lock_t. The Intel
// compiler always allocates a pointer-sized area, as does visual studio.
@@ -52,77 +52,60 @@ typedef struct ident ident_t;
// intel archs. It allocates at least 8 bytes for nested lock (more on
// recent versions), but we are bounded by the pointer-sized chunks that
// the Intel compiler allocates.
-//
#if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
-# define OMP_LOCK_T_SIZE sizeof(int)
-# define OMP_NEST_LOCK_T_SIZE sizeof(void *)
+#define OMP_LOCK_T_SIZE sizeof(int)
+#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
#else
-# define OMP_LOCK_T_SIZE sizeof(void *)
-# define OMP_NEST_LOCK_T_SIZE sizeof(void *)
+#define OMP_LOCK_T_SIZE sizeof(void *)
+#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
#endif
-//
// The Intel compiler allocates a 32-byte chunk for a critical section.
// Both gcc and visual studio only allocate enough space for a pointer.
// Sometimes we know that the space was allocated by the Intel compiler.
-//
-#define OMP_CRITICAL_SIZE sizeof(void *)
-#define INTEL_CRITICAL_SIZE 32
+#define OMP_CRITICAL_SIZE sizeof(void *)
+#define INTEL_CRITICAL_SIZE 32
-//
// lock flags
-//
typedef kmp_uint32 kmp_lock_flags_t;
#define kmp_lf_critical_section 1
-//
// When a lock table is used, the indices are of kmp_lock_index_t
-//
typedef kmp_uint32 kmp_lock_index_t;
-//
// When memory allocated for locks are on the lock pool (free list),
// it is treated as structs of this type.
-//
struct kmp_lock_pool {
- union kmp_user_lock *next;
- kmp_lock_index_t index;
+ union kmp_user_lock *next;
+ kmp_lock_index_t index;
};
typedef struct kmp_lock_pool kmp_lock_pool_t;
-
-extern void __kmp_validate_locks( void );
-
+extern void __kmp_validate_locks(void);
// ----------------------------------------------------------------------------
-//
// There are 5 lock implementations:
-//
// 1. Test and set locks.
-// 2. futex locks (Linux* OS on x86 and Intel(R) Many Integrated Core architecture)
+// 2. futex locks (Linux* OS on x86 and Intel(R) Many Integrated Core
+// architecture)
// 3. Ticket (Lamport bakery) locks.
// 4. Queuing locks (with separate spin fields).
// 5. DRPA (Dynamically Reconfigurable Distributed Polling Area) locks
//
// and 3 lock purposes:
-//
-// 1. Bootstrap locks -- Used for a few locks available at library startup-shutdown time.
+// 1. Bootstrap locks -- Used for a few locks available at library
+// startup-shutdown time.
// These do not require non-negative global thread ID's.
// 2. Internal RTL locks -- Used everywhere else in the RTL
// 3. User locks (includes critical sections)
-//
// ----------------------------------------------------------------------------
-
// ============================================================================
// Lock implementations.
-// ============================================================================
-
-
-// ----------------------------------------------------------------------------
+//
// Test and set locks.
//
// Non-nested test and set locks differ from the other lock kinds (except
@@ -133,52 +116,53 @@ extern void __kmp_validate_locks( void )
// bytes, so we have to use a lock table for nested locks, and avoid accessing
// the depth_locked field for non-nested locks.
//
-// Information normally available to the tools, such as lock location,
-// lock usage (normal lock vs. critical section), etc. is not available with
-// test and set locks.
+// Information normally available to the tools, such as lock location, lock
+// usage (normal lock vs. critical section), etc. is not available with test and
+// set locks.
// ----------------------------------------------------------------------------
struct kmp_base_tas_lock {
- volatile kmp_int32 poll; // 0 => unlocked
- // locked: (gtid+1) of owning thread
- kmp_int32 depth_locked; // depth locked, for nested locks only
+ volatile kmp_int32 poll; // 0 => unlocked; locked: (gtid+1) of owning thread
+ kmp_int32 depth_locked; // depth locked, for nested locks only
};
typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
union kmp_tas_lock {
- kmp_base_tas_lock_t lk;
- kmp_lock_pool_t pool; // make certain struct is large enough
- double lk_align; // use worst case alignment
- // no cache line padding
+ kmp_base_tas_lock_t lk;
+ kmp_lock_pool_t pool; // make certain struct is large enough
+ double lk_align; // use worst case alignment; no cache line padding
};
typedef union kmp_tas_lock kmp_tas_lock_t;
-//
// Static initializer for test and set lock variables. Usage:
// kmp_tas_lock_t xlock = KMP_TAS_LOCK_INITIALIZER( xlock );
-//
-#define KMP_TAS_LOCK_INITIALIZER( lock ) { { 0, 0 } }
+#define KMP_TAS_LOCK_INITIALIZER(lock) \
+ { \
+ { 0, 0 } \
+ }
+
+extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
+extern void __kmp_init_tas_lock(kmp_tas_lock_t *lck);
+extern void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck);
+
+extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
+extern void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck);
+extern void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck);
-extern int __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_tas_lock( kmp_tas_lock_t *lck );
-extern void __kmp_destroy_tas_lock( kmp_tas_lock_t *lck );
-
-extern int __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_nested_tas_lock( kmp_tas_lock_t *lck );
-extern void __kmp_destroy_nested_tas_lock( kmp_tas_lock_t *lck );
-
-#define KMP_LOCK_RELEASED 1
-#define KMP_LOCK_STILL_HELD 0
+#define KMP_LOCK_RELEASED 1
+#define KMP_LOCK_STILL_HELD 0
#define KMP_LOCK_ACQUIRED_FIRST 1
-#define KMP_LOCK_ACQUIRED_NEXT 0
+#define KMP_LOCK_ACQUIRED_NEXT 0
-#define KMP_USE_FUTEX (KMP_OS_LINUX && !KMP_OS_CNK && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64))
+#define KMP_USE_FUTEX \
+ (KMP_OS_LINUX && !KMP_OS_CNK && \
+ (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64))
#if KMP_USE_FUTEX
@@ -188,82 +172,86 @@ extern void __kmp_destroy_nested_tas_loc
// Like non-nested test and set lock, non-nested futex locks use the memory
// allocated by the compiler for the lock, rather than a pointer to it.
//
-// Information normally available to the tools, such as lock location,
-// lock usage (normal lock vs. critical section), etc. is not available with
-// test and set locks. With non-nested futex locks, the lock owner is not
-// even available.
+// Information normally available to the tools, such as lock location, lock
+// usage (normal lock vs. critical section), etc. is not available with test and
+// set locks. With non-nested futex locks, the lock owner is not even available.
// ----------------------------------------------------------------------------
struct kmp_base_futex_lock {
- volatile kmp_int32 poll; // 0 => unlocked
- // 2*(gtid+1) of owning thread, 0 if unlocked
- // locked: (gtid+1) of owning thread
- kmp_int32 depth_locked; // depth locked, for nested locks only
+ volatile kmp_int32 poll; // 0 => unlocked
+ // 2*(gtid+1) of owning thread, 0 if unlocked
+ // locked: (gtid+1) of owning thread
+ kmp_int32 depth_locked; // depth locked, for nested locks only
};
typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
union kmp_futex_lock {
- kmp_base_futex_lock_t lk;
- kmp_lock_pool_t pool; // make certain struct is large enough
- double lk_align; // use worst case alignment
- // no cache line padding
+ kmp_base_futex_lock_t lk;
+ kmp_lock_pool_t pool; // make certain struct is large enough
+ double lk_align; // use worst case alignment
+ // no cache line padding
};
typedef union kmp_futex_lock kmp_futex_lock_t;
-//
// Static initializer for futex lock variables. Usage:
// kmp_futex_lock_t xlock = KMP_FUTEX_LOCK_INITIALIZER( xlock );
-//
-#define KMP_FUTEX_LOCK_INITIALIZER( lock ) { { 0, 0 } }
-
-extern int __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_futex_lock( kmp_futex_lock_t *lck );
-extern void __kmp_destroy_futex_lock( kmp_futex_lock_t *lck );
-
-extern int __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_nested_futex_lock( kmp_futex_lock_t *lck );
-extern void __kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck );
+#define KMP_FUTEX_LOCK_INITIALIZER(lock) \
+ { \
+ { 0, 0 } \
+ }
+
+extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
+extern void __kmp_init_futex_lock(kmp_futex_lock_t *lck);
+extern void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck);
+
+extern int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck,
+ kmp_int32 gtid);
+extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck,
+ kmp_int32 gtid);
+extern void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck);
+extern void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck);
#endif // KMP_USE_FUTEX
-
// ----------------------------------------------------------------------------
// Ticket locks.
-// ----------------------------------------------------------------------------
#ifdef __cplusplus
#ifdef _MSC_VER
-// MSVC won't allow use of std::atomic<> in a union since it has non-trivial copy constructor.
+// MSVC won't allow use of std::atomic<> in a union since it has non-trivial
+// copy constructor.
struct kmp_base_ticket_lock {
- // `initialized' must be the first entry in the lock data structure!
- std::atomic_bool initialized;
- volatile union kmp_ticket_lock *self; // points to the lock union
- ident_t const * location; // Source code location of omp_init_lock().
- std::atomic_uint next_ticket; // ticket number to give to next thread which acquires
- std::atomic_uint now_serving; // ticket number for thread which holds the lock
- std::atomic_int owner_id; // (gtid+1) of owning thread, 0 if unlocked
- std::atomic_int depth_locked; // depth locked, for nested locks only
- kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
+ // `initialized' must be the first entry in the lock data structure!
+ std::atomic_bool initialized;
+ volatile union kmp_ticket_lock *self; // points to the lock union
+ ident_t const *location; // Source code location of omp_init_lock().
+ std::atomic_uint
+ next_ticket; // ticket number to give to next thread which acquires
+ std::atomic_uint now_serving; // ticket number for thread which holds the lock
+ std::atomic_int owner_id; // (gtid+1) of owning thread, 0 if unlocked
+ std::atomic_int depth_locked; // depth locked, for nested locks only
+ kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
};
#else
struct kmp_base_ticket_lock {
- // `initialized' must be the first entry in the lock data structure!
- std::atomic<bool> initialized;
- volatile union kmp_ticket_lock *self; // points to the lock union
- ident_t const * location; // Source code location of omp_init_lock().
- std::atomic<unsigned> next_ticket; // ticket number to give to next thread which acquires
- std::atomic<unsigned> now_serving; // ticket number for thread which holds the lock
- std::atomic<int> owner_id; // (gtid+1) of owning thread, 0 if unlocked
- std::atomic<int> depth_locked; // depth locked, for nested locks only
- kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
+ // `initialized' must be the first entry in the lock data structure!
+ std::atomic<bool> initialized;
+ volatile union kmp_ticket_lock *self; // points to the lock union
+ ident_t const *location; // Source code location of omp_init_lock().
+ std::atomic<unsigned>
+ next_ticket; // ticket number to give to next thread which acquires
+ std::atomic<unsigned>
+ now_serving; // ticket number for thread which holds the lock
+ std::atomic<int> owner_id; // (gtid+1) of owning thread, 0 if unlocked
+ std::atomic<int> depth_locked; // depth locked, for nested locks only
+ kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
};
#endif
@@ -276,44 +264,46 @@ struct kmp_base_ticket_lock;
typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
union KMP_ALIGN_CACHE kmp_ticket_lock {
- kmp_base_ticket_lock_t lk; // This field must be first to allow static initializing.
- kmp_lock_pool_t pool;
- double lk_align; // use worst case alignment
- char lk_pad[ KMP_PAD( kmp_base_ticket_lock_t, CACHE_LINE ) ];
+ kmp_base_ticket_lock_t
+ lk; // This field must be first to allow static initializing.
+ kmp_lock_pool_t pool;
+ double lk_align; // use worst case alignment
+ char lk_pad[KMP_PAD(kmp_base_ticket_lock_t, CACHE_LINE)];
};
typedef union kmp_ticket_lock kmp_ticket_lock_t;
-//
// Static initializer for simple ticket lock variables. Usage:
// kmp_ticket_lock_t xlock = KMP_TICKET_LOCK_INITIALIZER( xlock );
// Note the macro argument. It is important to make var properly initialized.
-//
-#define KMP_TICKET_LOCK_INITIALIZER( lock ) { { ATOMIC_VAR_INIT(true), \
- &(lock), \
- NULL, \
- ATOMIC_VAR_INIT(0U), \
- ATOMIC_VAR_INIT(0U), \
- ATOMIC_VAR_INIT(0), \
- ATOMIC_VAR_INIT(-1) } }
-
-extern int __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_ticket_lock_with_cheks( kmp_ticket_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_ticket_lock( kmp_ticket_lock_t *lck );
-extern void __kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck );
-
-extern int __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_nested_ticket_lock( kmp_ticket_lock_t *lck );
-extern void __kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck );
-
+#define KMP_TICKET_LOCK_INITIALIZER(lock) \
+ { \
+ { \
+ ATOMIC_VAR_INIT(true) \
+ , &(lock), NULL, ATOMIC_VAR_INIT(0U), ATOMIC_VAR_INIT(0U), \
+ ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(-1) \
+ } \
+ }
+
+extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_test_ticket_lock_with_cheks(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid);
+extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
+extern void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck);
+extern void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck);
+
+extern int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid);
+extern int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid);
+extern int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck,
+ kmp_int32 gtid);
+extern void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck);
+extern void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck);
// ----------------------------------------------------------------------------
// Queuing locks.
-// ----------------------------------------------------------------------------
#if KMP_USE_ADAPTIVE_LOCKS
@@ -324,17 +314,17 @@ typedef struct kmp_adaptive_lock_info km
#if KMP_DEBUG_ADAPTIVE_LOCKS
struct kmp_adaptive_lock_statistics {
- /* So we can get stats from locks that haven't been destroyed. */
- kmp_adaptive_lock_info_t * next;
- kmp_adaptive_lock_info_t * prev;
-
- /* Other statistics */
- kmp_uint32 successfulSpeculations;
- kmp_uint32 hardFailedSpeculations;
- kmp_uint32 softFailedSpeculations;
- kmp_uint32 nonSpeculativeAcquires;
- kmp_uint32 nonSpeculativeAcquireAttempts;
- kmp_uint32 lemmingYields;
+ /* So we can get stats from locks that haven't been destroyed. */
+ kmp_adaptive_lock_info_t *next;
+ kmp_adaptive_lock_info_t *prev;
+
+ /* Other statistics */
+ kmp_uint32 successfulSpeculations;
+ kmp_uint32 hardFailedSpeculations;
+ kmp_uint32 softFailedSpeculations;
+ kmp_uint32 nonSpeculativeAcquires;
+ kmp_uint32 nonSpeculativeAcquireAttempts;
+ kmp_uint32 lemmingYields;
};
typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
@@ -344,188 +334,182 @@ extern void __kmp_init_speculative_stats
#endif // KMP_DEBUG_ADAPTIVE_LOCKS
-struct kmp_adaptive_lock_info
-{
- /* Values used for adaptivity.
- * Although these are accessed from multiple threads we don't access them atomically,
- * because if we miss updates it probably doesn't matter much. (It just affects our
- * decision about whether to try speculation on the lock).
- */
- kmp_uint32 volatile badness;
- kmp_uint32 volatile acquire_attempts;
- /* Parameters of the lock. */
- kmp_uint32 max_badness;
- kmp_uint32 max_soft_retries;
+struct kmp_adaptive_lock_info {
+ /* Values used for adaptivity.
+ Although these are accessed from multiple threads we don't access them
+ atomically, because if we miss updates it probably doesn't matter much. (It
+ just affects our decision about whether to try speculation on the lock). */
+ kmp_uint32 volatile badness;
+ kmp_uint32 volatile acquire_attempts;
+ /* Parameters of the lock. */
+ kmp_uint32 max_badness;
+ kmp_uint32 max_soft_retries;
#if KMP_DEBUG_ADAPTIVE_LOCKS
- kmp_adaptive_lock_statistics_t volatile stats;
+ kmp_adaptive_lock_statistics_t volatile stats;
#endif
};
#endif // KMP_USE_ADAPTIVE_LOCKS
-
struct kmp_base_queuing_lock {
- // `initialized' must be the first entry in the lock data structure!
- volatile union kmp_queuing_lock *initialized; // Points to the lock union if in initialized state.
-
- ident_t const * location; // Source code location of omp_init_lock().
-
- KMP_ALIGN( 8 ) // tail_id must be 8-byte aligned!
-
- volatile kmp_int32 tail_id; // (gtid+1) of thread at tail of wait queue, 0 if empty
- // Must be no padding here since head/tail used in 8-byte CAS
- volatile kmp_int32 head_id; // (gtid+1) of thread at head of wait queue, 0 if empty
- // Decl order assumes little endian
- // bakery-style lock
- volatile kmp_uint32 next_ticket; // ticket number to give to next thread which acquires
- volatile kmp_uint32 now_serving; // ticket number for thread which holds the lock
- volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
- kmp_int32 depth_locked; // depth locked, for nested locks only
+ // `initialized' must be the first entry in the lock data structure!
+ volatile union kmp_queuing_lock
+ *initialized; // Points to the lock union if in initialized state.
+
+ ident_t const *location; // Source code location of omp_init_lock().
+
+ KMP_ALIGN(8) // tail_id must be 8-byte aligned!
+
+ volatile kmp_int32
+ tail_id; // (gtid+1) of thread at tail of wait queue, 0 if empty
+ // Must be no padding here since head/tail used in 8-byte CAS
+ volatile kmp_int32
+ head_id; // (gtid+1) of thread at head of wait queue, 0 if empty
+ // Decl order assumes little endian
+ // bakery-style lock
+ volatile kmp_uint32
+ next_ticket; // ticket number to give to next thread which acquires
+ volatile kmp_uint32
+ now_serving; // ticket number for thread which holds the lock
+ volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
+ kmp_int32 depth_locked; // depth locked, for nested locks only
- kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
+ kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
};
typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
-KMP_BUILD_ASSERT( offsetof( kmp_base_queuing_lock_t, tail_id ) % 8 == 0 );
+KMP_BUILD_ASSERT(offsetof(kmp_base_queuing_lock_t, tail_id) % 8 == 0);
union KMP_ALIGN_CACHE kmp_queuing_lock {
- kmp_base_queuing_lock_t lk; // This field must be first to allow static initializing.
- kmp_lock_pool_t pool;
- double lk_align; // use worst case alignment
- char lk_pad[ KMP_PAD( kmp_base_queuing_lock_t, CACHE_LINE ) ];
+ kmp_base_queuing_lock_t
+ lk; // This field must be first to allow static initializing.
+ kmp_lock_pool_t pool;
+ double lk_align; // use worst case alignment
+ char lk_pad[KMP_PAD(kmp_base_queuing_lock_t, CACHE_LINE)];
};
typedef union kmp_queuing_lock kmp_queuing_lock_t;
-extern int __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_queuing_lock( kmp_queuing_lock_t *lck );
-extern void __kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck );
-
-extern int __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_nested_queuing_lock( kmp_queuing_lock_t *lck );
-extern void __kmp_destroy_nested_queuing_lock( kmp_queuing_lock_t *lck );
+extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
+extern void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck);
+extern void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck);
+
+extern int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid);
+extern int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid);
+extern int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck,
+ kmp_int32 gtid);
+extern void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck);
+extern void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck);
#if KMP_USE_ADAPTIVE_LOCKS
// ----------------------------------------------------------------------------
// Adaptive locks.
-// ----------------------------------------------------------------------------
struct kmp_base_adaptive_lock {
- kmp_base_queuing_lock qlk;
- KMP_ALIGN(CACHE_LINE)
- kmp_adaptive_lock_info_t adaptive; // Information for the speculative adaptive lock
+ kmp_base_queuing_lock qlk;
+ KMP_ALIGN(CACHE_LINE)
+ kmp_adaptive_lock_info_t
+ adaptive; // Information for the speculative adaptive lock
};
typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
union KMP_ALIGN_CACHE kmp_adaptive_lock {
- kmp_base_adaptive_lock_t lk;
- kmp_lock_pool_t pool;
- double lk_align;
- char lk_pad[ KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE) ];
+ kmp_base_adaptive_lock_t lk;
+ kmp_lock_pool_t pool;
+ double lk_align;
+ char lk_pad[KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE)];
};
typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
-# define GET_QLK_PTR(l) ((kmp_queuing_lock_t *) & (l)->lk.qlk)
+#define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk)
#endif // KMP_USE_ADAPTIVE_LOCKS
// ----------------------------------------------------------------------------
// DRDPA ticket locks.
-// ----------------------------------------------------------------------------
-
struct kmp_base_drdpa_lock {
- //
- // All of the fields on the first cache line are only written when
- // initializing or reconfiguring the lock. These are relatively rare
- // operations, so data from the first cache line will usually stay
- // resident in the cache of each thread trying to acquire the lock.
- //
- // initialized must be the first entry in the lock data structure!
- //
- KMP_ALIGN_CACHE
-
- volatile union kmp_drdpa_lock * initialized; // points to the lock union if in initialized state
- ident_t const * location; // Source code location of omp_init_lock().
- volatile struct kmp_lock_poll {
- kmp_uint64 poll;
- } * volatile polls;
- volatile kmp_uint64 mask; // is 2**num_polls-1 for mod op
- kmp_uint64 cleanup_ticket; // thread with cleanup ticket
- volatile struct kmp_lock_poll * old_polls; // will deallocate old_polls
- kmp_uint32 num_polls; // must be power of 2
-
- //
- // next_ticket it needs to exist in a separate cache line, as it is
- // invalidated every time a thread takes a new ticket.
- //
- KMP_ALIGN_CACHE
-
- volatile kmp_uint64 next_ticket;
-
- //
- // now_serving is used to store our ticket value while we hold the lock.
- // It has a slightly different meaning in the DRDPA ticket locks (where
- // it is written by the acquiring thread) than it does in the simple
- // ticket locks (where it is written by the releasing thread).
- //
- // Since now_serving is only read an written in the critical section,
- // it is non-volatile, but it needs to exist on a separate cache line,
- // as it is invalidated at every lock acquire.
- //
- // Likewise, the vars used for nested locks (owner_id and depth_locked)
- // are only written by the thread owning the lock, so they are put in
- // this cache line. owner_id is read by other threads, so it must be
- // declared volatile.
- //
- KMP_ALIGN_CACHE
-
- kmp_uint64 now_serving; // doesn't have to be volatile
- volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
- kmp_int32 depth_locked; // depth locked
- kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
+ // All of the fields on the first cache line are only written when
+ // initializing or reconfiguring the lock. These are relatively rare
+ // operations, so data from the first cache line will usually stay resident in
+ // the cache of each thread trying to acquire the lock.
+ //
+ // initialized must be the first entry in the lock data structure!
+ KMP_ALIGN_CACHE
+
+ volatile union kmp_drdpa_lock
+ *initialized; // points to the lock union if in initialized state
+ ident_t const *location; // Source code location of omp_init_lock().
+ volatile struct kmp_lock_poll { kmp_uint64 poll; } * volatile polls;
+ volatile kmp_uint64 mask; // is 2**num_polls-1 for mod op
+ kmp_uint64 cleanup_ticket; // thread with cleanup ticket
+ volatile struct kmp_lock_poll *old_polls; // will deallocate old_polls
+ kmp_uint32 num_polls; // must be power of 2
+
+ // next_ticket it needs to exist in a separate cache line, as it is
+ // invalidated every time a thread takes a new ticket.
+ KMP_ALIGN_CACHE
+
+ volatile kmp_uint64 next_ticket;
+
+ // now_serving is used to store our ticket value while we hold the lock. It
+ // has a slightly different meaning in the DRDPA ticket locks (where it is
+ // written by the acquiring thread) than it does in the simple ticket locks
+ // (where it is written by the releasing thread).
+ //
+ // Since now_serving is only read an written in the critical section,
+ // it is non-volatile, but it needs to exist on a separate cache line,
+ // as it is invalidated at every lock acquire.
+ //
+ // Likewise, the vars used for nested locks (owner_id and depth_locked) are
+ // only written by the thread owning the lock, so they are put in this cache
+ // line. owner_id is read by other threads, so it must be declared volatile.
+ KMP_ALIGN_CACHE
+ kmp_uint64 now_serving; // doesn't have to be volatile
+ volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
+ kmp_int32 depth_locked; // depth locked
+ kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
};
typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
union KMP_ALIGN_CACHE kmp_drdpa_lock {
- kmp_base_drdpa_lock_t lk; // This field must be first to allow static initializing. */
- kmp_lock_pool_t pool;
- double lk_align; // use worst case alignment
- char lk_pad[ KMP_PAD( kmp_base_drdpa_lock_t, CACHE_LINE ) ];
+ kmp_base_drdpa_lock_t
+ lk; // This field must be first to allow static initializing. */
+ kmp_lock_pool_t pool;
+ double lk_align; // use worst case alignment
+ char lk_pad[KMP_PAD(kmp_base_drdpa_lock_t, CACHE_LINE)];
};
typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
-extern int __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck );
-extern void __kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck );
-
-extern int __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
-extern int __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
-extern void __kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
-extern void __kmp_destroy_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
-
+extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
+extern void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck);
+extern void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck);
+
+extern int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
+ kmp_int32 gtid);
+extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
+extern int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
+ kmp_int32 gtid);
+extern void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
+extern void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
// ============================================================================
// Lock purposes.
// ============================================================================
-
-// ----------------------------------------------------------------------------
// Bootstrap locks.
-// ----------------------------------------------------------------------------
-
+//
// Bootstrap locks -- very few locks used at library initialization time.
// Bootstrap locks are currently implemented as ticket locks.
// They could also be implemented as test and set lock, but cannot be
@@ -534,111 +518,80 @@ extern void __kmp_destroy_nested_drdpa_l
typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
-#define KMP_BOOTSTRAP_LOCK_INITIALIZER( lock ) KMP_TICKET_LOCK_INITIALIZER( (lock) )
+#define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) KMP_TICKET_LOCK_INITIALIZER((lock))
-static inline int
-__kmp_acquire_bootstrap_lock( kmp_bootstrap_lock_t *lck )
-{
- return __kmp_acquire_ticket_lock( lck, KMP_GTID_DNE );
+static inline int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
+ return __kmp_acquire_ticket_lock(lck, KMP_GTID_DNE);
}
-static inline int
-__kmp_test_bootstrap_lock( kmp_bootstrap_lock_t *lck )
-{
- return __kmp_test_ticket_lock( lck, KMP_GTID_DNE );
+static inline int __kmp_test_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
+ return __kmp_test_ticket_lock(lck, KMP_GTID_DNE);
}
-static inline void
-__kmp_release_bootstrap_lock( kmp_bootstrap_lock_t *lck )
-{
- __kmp_release_ticket_lock( lck, KMP_GTID_DNE );
+static inline void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
+ __kmp_release_ticket_lock(lck, KMP_GTID_DNE);
}
-static inline void
-__kmp_init_bootstrap_lock( kmp_bootstrap_lock_t *lck )
-{
- __kmp_init_ticket_lock( lck );
+static inline void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
+ __kmp_init_ticket_lock(lck);
}
-static inline void
-__kmp_destroy_bootstrap_lock( kmp_bootstrap_lock_t *lck )
-{
- __kmp_destroy_ticket_lock( lck );
+static inline void __kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
+ __kmp_destroy_ticket_lock(lck);
}
-
-// ----------------------------------------------------------------------------
// Internal RTL locks.
-// ----------------------------------------------------------------------------
-
//
// Internal RTL locks are also implemented as ticket locks, for now.
//
// FIXME - We should go through and figure out which lock kind works best for
// each internal lock, and use the type declaration and function calls for
// that explicit lock kind (and get rid of this section).
-//
typedef kmp_ticket_lock_t kmp_lock_t;
-static inline int
-__kmp_acquire_lock( kmp_lock_t *lck, kmp_int32 gtid )
-{
- return __kmp_acquire_ticket_lock( lck, gtid );
+static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) {
+ return __kmp_acquire_ticket_lock(lck, gtid);
}
-static inline int
-__kmp_test_lock( kmp_lock_t *lck, kmp_int32 gtid )
-{
- return __kmp_test_ticket_lock( lck, gtid );
+static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) {
+ return __kmp_test_ticket_lock(lck, gtid);
}
-static inline void
-__kmp_release_lock( kmp_lock_t *lck, kmp_int32 gtid )
-{
- __kmp_release_ticket_lock( lck, gtid );
+static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) {
+ __kmp_release_ticket_lock(lck, gtid);
}
-static inline void
-__kmp_init_lock( kmp_lock_t *lck )
-{
- __kmp_init_ticket_lock( lck );
+static inline void __kmp_init_lock(kmp_lock_t *lck) {
+ __kmp_init_ticket_lock(lck);
}
-static inline void
-__kmp_destroy_lock( kmp_lock_t *lck )
-{
- __kmp_destroy_ticket_lock( lck );
+static inline void __kmp_destroy_lock(kmp_lock_t *lck) {
+ __kmp_destroy_ticket_lock(lck);
}
-
-// ----------------------------------------------------------------------------
// User locks.
-// ----------------------------------------------------------------------------
-
-//
-// Do not allocate objects of type union kmp_user_lock!!!
-// This will waste space unless __kmp_user_lock_kind == lk_drdpa.
-// Instead, check the value of __kmp_user_lock_kind and allocate objects of
-// the type of the appropriate union member, and cast their addresses to
-// kmp_user_lock_p.
//
+// Do not allocate objects of type union kmp_user_lock!!! This will waste space
+// unless __kmp_user_lock_kind == lk_drdpa. Instead, check the value of
+// __kmp_user_lock_kind and allocate objects of the type of the appropriate
+// union member, and cast their addresses to kmp_user_lock_p.
enum kmp_lock_kind {
- lk_default = 0,
- lk_tas,
+ lk_default = 0,
+ lk_tas,
#if KMP_USE_FUTEX
- lk_futex,
+ lk_futex,
#endif
#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
- lk_hle,
- lk_rtm,
+ lk_hle,
+ lk_rtm,
#endif
- lk_ticket,
- lk_queuing,
- lk_drdpa,
+ lk_ticket,
+ lk_queuing,
+ lk_drdpa,
#if KMP_USE_ADAPTIVE_LOCKS
- lk_adaptive
+ lk_adaptive
#endif // KMP_USE_ADAPTIVE_LOCKS
};
@@ -647,279 +600,276 @@ typedef enum kmp_lock_kind kmp_lock_kind
extern kmp_lock_kind_t __kmp_user_lock_kind;
union kmp_user_lock {
- kmp_tas_lock_t tas;
+ kmp_tas_lock_t tas;
#if KMP_USE_FUTEX
- kmp_futex_lock_t futex;
+ kmp_futex_lock_t futex;
#endif
- kmp_ticket_lock_t ticket;
- kmp_queuing_lock_t queuing;
- kmp_drdpa_lock_t drdpa;
+ kmp_ticket_lock_t ticket;
+ kmp_queuing_lock_t queuing;
+ kmp_drdpa_lock_t drdpa;
#if KMP_USE_ADAPTIVE_LOCKS
- kmp_adaptive_lock_t adaptive;
+ kmp_adaptive_lock_t adaptive;
#endif // KMP_USE_ADAPTIVE_LOCKS
- kmp_lock_pool_t pool;
+ kmp_lock_pool_t pool;
};
typedef union kmp_user_lock *kmp_user_lock_p;
-#if ! KMP_USE_DYNAMIC_LOCK
+#if !KMP_USE_DYNAMIC_LOCK
extern size_t __kmp_base_user_lock_size;
extern size_t __kmp_user_lock_size;
-extern kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck );
+extern kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck);
-static inline kmp_int32
-__kmp_get_user_lock_owner( kmp_user_lock_p lck )
-{
- KMP_DEBUG_ASSERT( __kmp_get_user_lock_owner_ != NULL );
- return ( *__kmp_get_user_lock_owner_ )( lck );
-}
-
-extern int ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
-
-#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
-
-#define __kmp_acquire_user_lock_with_checks(lck,gtid) \
- if (__kmp_user_lock_kind == lk_tas) { \
- if ( __kmp_env_consistency_check ) { \
- char const * const func = "omp_set_lock"; \
- if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) \
- && lck->tas.lk.depth_locked != -1 ) { \
- KMP_FATAL( LockNestableUsedAsSimple, func ); \
- } \
- if ( ( gtid >= 0 ) && ( lck->tas.lk.poll - 1 == gtid ) ) { \
- KMP_FATAL( LockIsAlreadyOwned, func ); \
- } \
- } \
- if ( ( lck->tas.lk.poll != 0 ) || \
- ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
- kmp_uint32 spins; \
- KMP_FSYNC_PREPARE( lck ); \
- KMP_INIT_YIELD( spins ); \
- if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
- KMP_YIELD( TRUE ); \
- } else { \
- KMP_YIELD_SPIN( spins ); \
- } \
- while ( ( lck->tas.lk.poll != 0 ) || \
- ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
- if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
- KMP_YIELD( TRUE ); \
- } else { \
- KMP_YIELD_SPIN( spins ); \
- } \
- } \
- } \
- KMP_FSYNC_ACQUIRED( lck ); \
- } else { \
- KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL ); \
- ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid ); \
- }
+static inline kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck) {
+ KMP_DEBUG_ASSERT(__kmp_get_user_lock_owner_ != NULL);
+ return (*__kmp_get_user_lock_owner_)(lck);
+}
+
+extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid);
+
+#if KMP_OS_LINUX && \
+ (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
+
+#define __kmp_acquire_user_lock_with_checks(lck, gtid) \
+ if (__kmp_user_lock_kind == lk_tas) { \
+ if (__kmp_env_consistency_check) { \
+ char const *const func = "omp_set_lock"; \
+ if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && \
+ lck->tas.lk.depth_locked != -1) { \
+ KMP_FATAL(LockNestableUsedAsSimple, func); \
+ } \
+ if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \
+ KMP_FATAL(LockIsAlreadyOwned, func); \
+ } \
+ } \
+ if ((lck->tas.lk.poll != 0) || \
+ (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \
+ kmp_uint32 spins; \
+ KMP_FSYNC_PREPARE(lck); \
+ KMP_INIT_YIELD(spins); \
+ if (TCR_4(__kmp_nth) > \
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
+ KMP_YIELD(TRUE); \
+ } else { \
+ KMP_YIELD_SPIN(spins); \
+ } \
+ while ( \
+ (lck->tas.lk.poll != 0) || \
+ (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \
+ if (TCR_4(__kmp_nth) > \
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
+ KMP_YIELD(TRUE); \
+ } else { \
+ KMP_YIELD_SPIN(spins); \
+ } \
+ } \
+ } \
+ KMP_FSYNC_ACQUIRED(lck); \
+ } else { \
+ KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); \
+ (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \
+ }
#else
-static inline int
-__kmp_acquire_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL );
- return ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid );
+static inline int __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,
+ kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL);
+ return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid);
}
#endif
-extern int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
+extern int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid);
-#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
+#if KMP_OS_LINUX && \
+ (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
-#include "kmp_i18n.h" /* AC: KMP_FATAL definition */
-extern int __kmp_env_consistency_check; /* AC: copy from kmp.h here */
-static inline int
-__kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
-{
- if ( __kmp_user_lock_kind == lk_tas ) {
- if ( __kmp_env_consistency_check ) {
- char const * const func = "omp_test_lock";
- if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
- && lck->tas.lk.depth_locked != -1 ) {
- KMP_FATAL( LockNestableUsedAsSimple, func );
- }
- }
- return ( ( lck->tas.lk.poll == 0 ) &&
- KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) );
- } else {
- KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
- return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
+#include "kmp_i18n.h" /* AC: KMP_FATAL definition */
+extern int __kmp_env_consistency_check; /* AC: copy from kmp.h here */
+static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
+ kmp_int32 gtid) {
+ if (__kmp_user_lock_kind == lk_tas) {
+ if (__kmp_env_consistency_check) {
+ char const *const func = "omp_test_lock";
+ if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
+ lck->tas.lk.depth_locked != -1) {
+ KMP_FATAL(LockNestableUsedAsSimple, func);
+ }
}
+ return ((lck->tas.lk.poll == 0) &&
+ KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1));
+ } else {
+ KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
+ return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
+ }
}
#else
-static inline int
-__kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
- return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
+static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
+ kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
+ return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
}
#endif
-extern int ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
+extern int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid);
-static inline void
-__kmp_release_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( __kmp_release_user_lock_with_checks_ != NULL );
- ( *__kmp_release_user_lock_with_checks_ ) ( lck, gtid );
+static inline void __kmp_release_user_lock_with_checks(kmp_user_lock_p lck,
+ kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(__kmp_release_user_lock_with_checks_ != NULL);
+ (*__kmp_release_user_lock_with_checks_)(lck, gtid);
}
-extern void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck );
+extern void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck);
-static inline void
-__kmp_init_user_lock_with_checks( kmp_user_lock_p lck )
-{
- KMP_DEBUG_ASSERT( __kmp_init_user_lock_with_checks_ != NULL );
- ( *__kmp_init_user_lock_with_checks_ )( lck );
+static inline void __kmp_init_user_lock_with_checks(kmp_user_lock_p lck) {
+ KMP_DEBUG_ASSERT(__kmp_init_user_lock_with_checks_ != NULL);
+ (*__kmp_init_user_lock_with_checks_)(lck);
}
-//
// We need a non-checking version of destroy lock for when the RTL is
// doing the cleanup as it can't always tell if the lock is nested or not.
-//
-extern void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck );
+extern void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck);
-static inline void
-__kmp_destroy_user_lock( kmp_user_lock_p lck )
-{
- KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_ != NULL );
- ( *__kmp_destroy_user_lock_ )( lck );
+static inline void __kmp_destroy_user_lock(kmp_user_lock_p lck) {
+ KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_ != NULL);
+ (*__kmp_destroy_user_lock_)(lck);
}
-extern void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck );
+extern void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck);
-static inline void
-__kmp_destroy_user_lock_with_checks( kmp_user_lock_p lck )
-{
- KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_with_checks_ != NULL );
- ( *__kmp_destroy_user_lock_with_checks_ )( lck );
+static inline void __kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck) {
+ KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_with_checks_ != NULL);
+ (*__kmp_destroy_user_lock_with_checks_)(lck);
}
-extern int ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
+extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid);
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
-#define __kmp_acquire_nested_user_lock_with_checks(lck,gtid,depth) \
- if (__kmp_user_lock_kind == lk_tas) { \
- if ( __kmp_env_consistency_check ) { \
- char const * const func = "omp_set_nest_lock"; \
- if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE ) \
- && lck->tas.lk.depth_locked == -1 ) { \
- KMP_FATAL( LockSimpleUsedAsNestable, func ); \
- } \
- } \
- if ( lck->tas.lk.poll - 1 == gtid ) { \
- lck->tas.lk.depth_locked += 1; \
- *depth = KMP_LOCK_ACQUIRED_NEXT; \
- } else { \
- if ( ( lck->tas.lk.poll != 0 ) || \
- ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
- kmp_uint32 spins; \
- KMP_FSYNC_PREPARE( lck ); \
- KMP_INIT_YIELD( spins ); \
- if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
- KMP_YIELD( TRUE ); \
- } else { \
- KMP_YIELD_SPIN( spins ); \
- } \
- while ( ( lck->tas.lk.poll != 0 ) || \
- ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
- if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
- KMP_YIELD( TRUE ); \
- } else { \
- KMP_YIELD_SPIN( spins ); \
- } \
- } \
- } \
- lck->tas.lk.depth_locked = 1; \
- *depth = KMP_LOCK_ACQUIRED_FIRST; \
- } \
- KMP_FSYNC_ACQUIRED( lck ); \
- } else { \
- KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL ); \
- *depth = ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); \
- }
+#define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \
+ if (__kmp_user_lock_kind == lk_tas) { \
+ if (__kmp_env_consistency_check) { \
+ char const *const func = "omp_set_nest_lock"; \
+ if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && \
+ lck->tas.lk.depth_locked == -1) { \
+ KMP_FATAL(LockSimpleUsedAsNestable, func); \
+ } \
+ } \
+ if (lck->tas.lk.poll - 1 == gtid) { \
+ lck->tas.lk.depth_locked += 1; \
+ *depth = KMP_LOCK_ACQUIRED_NEXT; \
+ } else { \
+ if ((lck->tas.lk.poll != 0) || \
+ (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \
+ kmp_uint32 spins; \
+ KMP_FSYNC_PREPARE(lck); \
+ KMP_INIT_YIELD(spins); \
+ if (TCR_4(__kmp_nth) > \
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
+ KMP_YIELD(TRUE); \
+ } else { \
+ KMP_YIELD_SPIN(spins); \
+ } \
+ while ((lck->tas.lk.poll != 0) || \
+ (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, \
+ gtid + 1))) { \
+ if (TCR_4(__kmp_nth) > \
+ (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
+ KMP_YIELD(TRUE); \
+ } else { \
+ KMP_YIELD_SPIN(spins); \
+ } \
+ } \
+ } \
+ lck->tas.lk.depth_locked = 1; \
+ *depth = KMP_LOCK_ACQUIRED_FIRST; \
+ } \
+ KMP_FSYNC_ACQUIRED(lck); \
+ } else { \
+ KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); \
+ *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \
+ }
#else
static inline void
-__kmp_acquire_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid, int* depth )
-{
- KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL );
- *depth = ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid );
+__kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid,
+ int *depth) {
+ KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL);
+ *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid);
}
#endif
-extern int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
+extern int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid);
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
-static inline int
-__kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
-{
- if ( __kmp_user_lock_kind == lk_tas ) {
- int retval;
- if ( __kmp_env_consistency_check ) {
- char const * const func = "omp_test_nest_lock";
- if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE )
- && lck->tas.lk.depth_locked == -1 ) {
- KMP_FATAL( LockSimpleUsedAsNestable, func );
- }
- }
- KMP_DEBUG_ASSERT( gtid >= 0 );
- if ( lck->tas.lk.poll - 1 == gtid ) { /* __kmp_get_tas_lock_owner( lck ) == gtid */
- return ++lck->tas.lk.depth_locked; /* same owner, depth increased */
- }
- retval = ( ( lck->tas.lk.poll == 0 ) &&
- KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) );
- if ( retval ) {
- KMP_MB();
- lck->tas.lk.depth_locked = 1;
- }
- return retval;
- } else {
- KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
- return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
+static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
+ kmp_int32 gtid) {
+ if (__kmp_user_lock_kind == lk_tas) {
+ int retval;
+ if (__kmp_env_consistency_check) {
+ char const *const func = "omp_test_nest_lock";
+ if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) &&
+ lck->tas.lk.depth_locked == -1) {
+ KMP_FATAL(LockSimpleUsedAsNestable, func);
+ }
+ }
+ KMP_DEBUG_ASSERT(gtid >= 0);
+ if (lck->tas.lk.poll - 1 ==
+ gtid) { /* __kmp_get_tas_lock_owner( lck ) == gtid */
+ return ++lck->tas.lk.depth_locked; /* same owner, depth increased */
}
+ retval = ((lck->tas.lk.poll == 0) &&
+ KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1));
+ if (retval) {
+ KMP_MB();
+ lck->tas.lk.depth_locked = 1;
+ }
+ return retval;
+ } else {
+ KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
+ return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
+ }
}
#else
-static inline int
-__kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
- return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
+static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
+ kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
+ return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
}
#endif
-extern int ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
+extern int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
+ kmp_int32 gtid);
static inline int
-__kmp_release_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
-{
- KMP_DEBUG_ASSERT( __kmp_release_nested_user_lock_with_checks_ != NULL );
- return ( *__kmp_release_nested_user_lock_with_checks_ )( lck, gtid );
+__kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,
+ kmp_int32 gtid) {
+ KMP_DEBUG_ASSERT(__kmp_release_nested_user_lock_with_checks_ != NULL);
+ return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid);
}
-extern void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
+extern void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
-static inline void __kmp_init_nested_user_lock_with_checks( kmp_user_lock_p lck )
-{
- KMP_DEBUG_ASSERT( __kmp_init_nested_user_lock_with_checks_ != NULL );
- ( *__kmp_init_nested_user_lock_with_checks_ )( lck );
+static inline void
+__kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck) {
+ KMP_DEBUG_ASSERT(__kmp_init_nested_user_lock_with_checks_ != NULL);
+ (*__kmp_init_nested_user_lock_with_checks_)(lck);
}
-extern void ( *__kmp_destroy_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
+extern void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
static inline void
-__kmp_destroy_nested_user_lock_with_checks( kmp_user_lock_p lck )
-{
- KMP_DEBUG_ASSERT( __kmp_destroy_nested_user_lock_with_checks_ != NULL );
- ( *__kmp_destroy_nested_user_lock_with_checks_ )( lck );
+__kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) {
+ KMP_DEBUG_ASSERT(__kmp_destroy_nested_user_lock_with_checks_ != NULL);
+ (*__kmp_destroy_nested_user_lock_with_checks_)(lck);
}
-//
// user lock functions which do not necessarily exist for all lock kinds.
//
// The "set" functions usually have wrapper routines that check for a NULL set
@@ -932,103 +882,96 @@ __kmp_destroy_nested_user_lock_with_chec
// In other cases, the calling code really should differentiate between an
// unimplemented function and one that is implemented but returning NULL /
// invalied value. If this is the case, no get function wrapper exists.
-//
-extern int ( *__kmp_is_user_lock_initialized_ )( kmp_user_lock_p lck );
+extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck);
// no set function; fields set durining local allocation
-extern const ident_t * ( *__kmp_get_user_lock_location_ )( kmp_user_lock_p lck );
+extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck);
-static inline const ident_t *
-__kmp_get_user_lock_location( kmp_user_lock_p lck )
-{
- if ( __kmp_get_user_lock_location_ != NULL ) {
- return ( *__kmp_get_user_lock_location_ )( lck );
- }
- else {
- return NULL;
- }
+static inline const ident_t *__kmp_get_user_lock_location(kmp_user_lock_p lck) {
+ if (__kmp_get_user_lock_location_ != NULL) {
+ return (*__kmp_get_user_lock_location_)(lck);
+ } else {
+ return NULL;
+ }
}
-extern void ( *__kmp_set_user_lock_location_ )( kmp_user_lock_p lck, const ident_t *loc );
+extern void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
+ const ident_t *loc);
-static inline void
-__kmp_set_user_lock_location( kmp_user_lock_p lck, const ident_t *loc )
-{
- if ( __kmp_set_user_lock_location_ != NULL ) {
- ( *__kmp_set_user_lock_location_ )( lck, loc );
- }
+static inline void __kmp_set_user_lock_location(kmp_user_lock_p lck,
+ const ident_t *loc) {
+ if (__kmp_set_user_lock_location_ != NULL) {
+ (*__kmp_set_user_lock_location_)(lck, loc);
+ }
}
-extern kmp_lock_flags_t ( *__kmp_get_user_lock_flags_ )( kmp_user_lock_p lck );
+extern kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck);
-extern void ( *__kmp_set_user_lock_flags_ )( kmp_user_lock_p lck, kmp_lock_flags_t flags );
+extern void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
+ kmp_lock_flags_t flags);
-static inline void
-__kmp_set_user_lock_flags( kmp_user_lock_p lck, kmp_lock_flags_t flags )
-{
- if ( __kmp_set_user_lock_flags_ != NULL ) {
- ( *__kmp_set_user_lock_flags_ )( lck, flags );
- }
+static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck,
+ kmp_lock_flags_t flags) {
+ if (__kmp_set_user_lock_flags_ != NULL) {
+ (*__kmp_set_user_lock_flags_)(lck, flags);
+ }
}
-//
// The fuction which sets up all of the vtbl pointers for kmp_user_lock_t.
-//
-extern void __kmp_set_user_lock_vptrs( kmp_lock_kind_t user_lock_kind );
+extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind);
-//
// Macros for binding user lock functions.
-//
-#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) { \
- __kmp_acquire##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
- __kmp_acquire##nest##kind##_##suffix; \
- __kmp_release##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
- __kmp_release##nest##kind##_##suffix; \
- __kmp_test##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
- __kmp_test##nest##kind##_##suffix; \
- __kmp_init##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p ) ) \
- __kmp_init##nest##kind##_##suffix; \
- __kmp_destroy##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p ) ) \
- __kmp_destroy##nest##kind##_##suffix; \
-}
-
-#define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
-#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
-#define KMP_BIND_NESTED_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
-#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
+#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) \
+ { \
+ __kmp_acquire##nest##user_lock_with_checks_ = (int (*)( \
+ kmp_user_lock_p, kmp_int32))__kmp_acquire##nest##kind##_##suffix; \
+ __kmp_release##nest##user_lock_with_checks_ = (int (*)( \
+ kmp_user_lock_p, kmp_int32))__kmp_release##nest##kind##_##suffix; \
+ __kmp_test##nest##user_lock_with_checks_ = (int (*)( \
+ kmp_user_lock_p, kmp_int32))__kmp_test##nest##kind##_##suffix; \
+ __kmp_init##nest##user_lock_with_checks_ = \
+ (void (*)(kmp_user_lock_p))__kmp_init##nest##kind##_##suffix; \
+ __kmp_destroy##nest##user_lock_with_checks_ = \
+ (void (*)(kmp_user_lock_p))__kmp_destroy##nest##kind##_##suffix; \
+ }
+
+#define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
+#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) \
+ KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
+#define KMP_BIND_NESTED_USER_LOCK(kind) \
+ KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
+#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) \
+ KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
-// ----------------------------------------------------------------------------
// User lock table & lock allocation
-// ----------------------------------------------------------------------------
-
-/*
- On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory for lock variable, which
- is not enough to store a pointer, so we have to use lock indexes instead of pointers and
- maintain lock table to map indexes to pointers.
-
-
- Note: The first element of the table is not a pointer to lock! It is a pointer to previously
- allocated table (or NULL if it is the first table).
-
- Usage:
-
- if ( OMP_LOCK_T_SIZE < sizeof( <lock> ) ) { // or OMP_NEST_LOCK_T_SIZE
- Lock table is fully utilized. User locks are indexes, so table is
- used on user lock operation.
- Note: it may be the case (lin_32) that we don't need to use a lock
- table for regular locks, but do need the table for nested locks.
- }
- else {
- Lock table initialized but not actually used.
- }
+/* On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory
+ for lock variable, which is not enough to store a pointer, so we have to use
+ lock indexes instead of pointers and maintain lock table to map indexes to
+ pointers.
+
+
+ Note: The first element of the table is not a pointer to lock! It is a
+ pointer to previously allocated table (or NULL if it is the first table).
+
+ Usage:
+
+ if ( OMP_LOCK_T_SIZE < sizeof( <lock> ) ) { // or OMP_NEST_LOCK_T_SIZE
+ Lock table is fully utilized. User locks are indexes, so table is used on
+ user lock operation.
+ Note: it may be the case (lin_32) that we don't need to use a lock
+ table for regular locks, but do need the table for nested locks.
+ }
+ else {
+ Lock table initialized but not actually used.
+ }
*/
struct kmp_lock_table {
- kmp_lock_index_t used; // Number of used elements
- kmp_lock_index_t allocated; // Number of allocated elements
- kmp_user_lock_p * table; // Lock table.
+ kmp_lock_index_t used; // Number of used elements
+ kmp_lock_index_t allocated; // Number of allocated elements
+ kmp_user_lock_p *table; // Lock table.
};
typedef struct kmp_lock_table kmp_lock_table_t;
@@ -1037,8 +980,8 @@ extern kmp_lock_table_t __kmp_user_lock_
extern kmp_user_lock_p __kmp_lock_pool;
struct kmp_block_of_locks {
- struct kmp_block_of_locks * next_block;
- void * locks;
+ struct kmp_block_of_locks *next_block;
+ void *locks;
};
typedef struct kmp_block_of_locks kmp_block_of_locks_t;
@@ -1046,21 +989,25 @@ typedef struct kmp_block_of_locks kmp_bl
extern kmp_block_of_locks_t *__kmp_lock_blocks;
extern int __kmp_num_locks_in_block;
-extern kmp_user_lock_p __kmp_user_lock_allocate( void **user_lock, kmp_int32 gtid, kmp_lock_flags_t flags );
-extern void __kmp_user_lock_free( void **user_lock, kmp_int32 gtid, kmp_user_lock_p lck );
-extern kmp_user_lock_p __kmp_lookup_user_lock( void **user_lock, char const *func );
+extern kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock,
+ kmp_int32 gtid,
+ kmp_lock_flags_t flags);
+extern void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
+ kmp_user_lock_p lck);
+extern kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock,
+ char const *func);
extern void __kmp_cleanup_user_locks();
-#define KMP_CHECK_USER_LOCK_INIT() \
- { \
- if ( ! TCR_4( __kmp_init_user_locks ) ) { \
- __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); \
- if ( ! TCR_4( __kmp_init_user_locks ) ) { \
- TCW_4( __kmp_init_user_locks, TRUE ); \
- } \
- __kmp_release_bootstrap_lock( &__kmp_initz_lock ); \
- } \
- }
+#define KMP_CHECK_USER_LOCK_INIT() \
+ { \
+ if (!TCR_4(__kmp_init_user_locks)) { \
+ __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); \
+ if (!TCR_4(__kmp_init_user_locks)) { \
+ TCW_4(__kmp_init_user_locks, TRUE); \
+ } \
+ __kmp_release_bootstrap_lock(&__kmp_initz_lock); \
+ } \
+ }
#endif // KMP_USE_DYNAMIC_LOCK
@@ -1068,168 +1015,187 @@ extern void __kmp_cleanup_user_locks();
#undef KMP_GTID_DNE
#if KMP_USE_DYNAMIC_LOCK
-
-//
-// KMP_USE_DYNAMIC_LOCK enables dynamic dispatch of lock functions without breaking the current
-// compatibility. Essential functionality of this new code is dynamic dispatch, but it also
-// implements (or enables implementation of) hinted user lock and critical section which will be
-// part of OMP 4.5 soon.
-//
-// Lock type can be decided at creation time (i.e., lock initialization), and subsequent lock
-// function call on the created lock object requires type extraction and call through jump table
-// using the extracted type. This type information is stored in two different ways depending on
-// the size of the lock object, and we differentiate lock types by this size requirement - direct
-// and indirect locks.
+// KMP_USE_DYNAMIC_LOCK enables dynamic dispatch of lock functions without
+// breaking the current compatibility. Essential functionality of this new code
+// is dynamic dispatch, but it also implements (or enables implementation of)
+// hinted user lock and critical section which will be part of OMP 4.5 soon.
+//
+// Lock type can be decided at creation time (i.e., lock initialization), and
+// subsequent lock function call on the created lock object requires type
+// extraction and call through jump table using the extracted type. This type
+// information is stored in two different ways depending on the size of the lock
+// object, and we differentiate lock types by this size requirement - direct and
+// indirect locks.
//
// Direct locks:
-// A direct lock object fits into the space created by the compiler for an omp_lock_t object, and
-// TAS/Futex lock falls into this category. We use low one byte of the lock object as the storage
-// for the lock type, and appropriate bit operation is required to access the data meaningful to
-// the lock algorithms. Also, to differentiate direct lock from indirect lock, 1 is written to LSB
-// of the lock object. The newly introduced "hle" lock is also a direct lock.
+// A direct lock object fits into the space created by the compiler for an
+// omp_lock_t object, and TAS/Futex lock falls into this category. We use low
+// one byte of the lock object as the storage for the lock type, and appropriate
+// bit operation is required to access the data meaningful to the lock
+// algorithms. Also, to differentiate direct lock from indirect lock, 1 is
+// written to LSB of the lock object. The newly introduced "hle" lock is also a
+// direct lock.
//
// Indirect locks:
-// An indirect lock object requires more space than the compiler-generated space, and it should be
-// allocated from heap. Depending on the size of the compiler-generated space for the lock (i.e.,
-// size of omp_lock_t), this omp_lock_t object stores either the address of the heap-allocated
-// indirect lock (void * fits in the object) or an index to the indirect lock table entry that
-// holds the address. Ticket/Queuing/DRDPA/Adaptive lock falls into this category, and the newly
-// introduced "rtm" lock is also an indirect lock which was implemented on top of the Queuing lock.
-// When the omp_lock_t object holds an index (not lock address), 0 is written to LSB to
-// differentiate the lock from a direct lock, and the remaining part is the actual index to the
+// An indirect lock object requires more space than the compiler-generated
+// space, and it should be allocated from heap. Depending on the size of the
+// compiler-generated space for the lock (i.e., size of omp_lock_t), this
+// omp_lock_t object stores either the address of the heap-allocated indirect
+// lock (void * fits in the object) or an index to the indirect lock table entry
+// that holds the address. Ticket/Queuing/DRDPA/Adaptive lock falls into this
+// category, and the newly introduced "rtm" lock is also an indirect lock which
+// was implemented on top of the Queuing lock. When the omp_lock_t object holds
+// an index (not lock address), 0 is written to LSB to differentiate the lock
+// from a direct lock, and the remaining part is the actual index to the
// indirect lock table.
-//
#include <stdint.h> // for uintptr_t
// Shortcuts
-#define KMP_USE_INLINED_TAS (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1
+#define KMP_USE_INLINED_TAS \
+ (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1
#define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0
// List of lock definitions; all nested locks are indirect locks.
// hle lock is xchg lock prefixed with XACQUIRE/XRELEASE.
// All nested locks are indirect lock types.
#if KMP_USE_TSX
-# if KMP_USE_FUTEX
-# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
-# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
- m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
- m(nested_queuing, a) m(nested_drdpa, a)
-# else
-# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a)
-# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
- m(nested_tas, a) m(nested_ticket, a) \
- m(nested_queuing, a) m(nested_drdpa, a)
-# endif // KMP_USE_FUTEX
-# define KMP_LAST_D_LOCK lockseq_hle
+#if KMP_USE_FUTEX
+#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
+#define KMP_FOREACH_I_LOCK(m, a) \
+ m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
+ m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
+ m(nested_queuing, a) m(nested_drdpa, a)
+#else
+#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a)
+#define KMP_FOREACH_I_LOCK(m, a) \
+ m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
+ m(nested_tas, a) m(nested_ticket, a) m(nested_queuing, a) \
+ m(nested_drdpa, a)
+#endif // KMP_USE_FUTEX
+#define KMP_LAST_D_LOCK lockseq_hle
+#else
+#if KMP_USE_FUTEX
+#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
+#define KMP_FOREACH_I_LOCK(m, a) \
+ m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_futex, a) \
+ m(nested_ticket, a) m(nested_queuing, a) m(nested_drdpa, a)
+#define KMP_LAST_D_LOCK lockseq_futex
#else
-# if KMP_USE_FUTEX
-# define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
-# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) \
- m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
- m(nested_queuing, a) m(nested_drdpa, a)
-# define KMP_LAST_D_LOCK lockseq_futex
-# else
-# define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
-# define KMP_FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) \
- m(nested_tas, a) m(nested_ticket, a) \
- m(nested_queuing, a) m(nested_drdpa, a)
-# define KMP_LAST_D_LOCK lockseq_tas
-# endif // KMP_USE_FUTEX
+#define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
+#define KMP_FOREACH_I_LOCK(m, a) \
+ m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_ticket, a) \
+ m(nested_queuing, a) m(nested_drdpa, a)
+#define KMP_LAST_D_LOCK lockseq_tas
+#endif // KMP_USE_FUTEX
#endif // KMP_USE_TSX
// Information used in dynamic dispatch
-#define KMP_LOCK_SHIFT 8 // number of low bits to be used as tag for direct locks
+#define KMP_LOCK_SHIFT \
+ 8 // number of low bits to be used as tag for direct locks
#define KMP_FIRST_D_LOCK lockseq_tas
#define KMP_FIRST_I_LOCK lockseq_ticket
-#define KMP_LAST_I_LOCK lockseq_nested_drdpa
-#define KMP_NUM_I_LOCKS (locktag_nested_drdpa+1) // number of indirect lock types
+#define KMP_LAST_I_LOCK lockseq_nested_drdpa
+#define KMP_NUM_I_LOCKS \
+ (locktag_nested_drdpa + 1) // number of indirect lock types
// Base type for dynamic locks.
typedef kmp_uint32 kmp_dyna_lock_t;
-// Lock sequence that enumerates all lock kinds.
-// Always make this enumeration consistent with kmp_lockseq_t in the include directory.
+// Lock sequence that enumerates all lock kinds. Always make this enumeration
+// consistent with kmp_lockseq_t in the include directory.
typedef enum {
- lockseq_indirect = 0,
-#define expand_seq(l,a) lockseq_##l,
- KMP_FOREACH_D_LOCK(expand_seq, 0)
- KMP_FOREACH_I_LOCK(expand_seq, 0)
+ lockseq_indirect = 0,
+#define expand_seq(l, a) lockseq_##l,
+ KMP_FOREACH_D_LOCK(expand_seq, 0) KMP_FOREACH_I_LOCK(expand_seq, 0)
#undef expand_seq
} kmp_dyna_lockseq_t;
// Enumerates indirect lock tags.
typedef enum {
-#define expand_tag(l,a) locktag_##l,
- KMP_FOREACH_I_LOCK(expand_tag, 0)
+#define expand_tag(l, a) locktag_##l,
+ KMP_FOREACH_I_LOCK(expand_tag, 0)
#undef expand_tag
} kmp_indirect_locktag_t;
// Utility macros that extract information from lock sequences.
-#define KMP_IS_D_LOCK(seq) ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK)
-#define KMP_IS_I_LOCK(seq) ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK)
-#define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq) - KMP_FIRST_I_LOCK)
-#define KMP_GET_D_TAG(seq) ((seq)<<1 | 1)
+#define KMP_IS_D_LOCK(seq) \
+ ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK)
+#define KMP_IS_I_LOCK(seq) \
+ ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK)
+#define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK)
+#define KMP_GET_D_TAG(seq) ((seq) << 1 | 1)
// Enumerates direct lock tags starting from indirect tag.
typedef enum {
-#define expand_tag(l,a) locktag_##l = KMP_GET_D_TAG(lockseq_##l),
- KMP_FOREACH_D_LOCK(expand_tag, 0)
+#define expand_tag(l, a) locktag_##l = KMP_GET_D_TAG(lockseq_##l),
+ KMP_FOREACH_D_LOCK(expand_tag, 0)
#undef expand_tag
} kmp_direct_locktag_t;
// Indirect lock type
typedef struct {
- kmp_user_lock_p lock;
- kmp_indirect_locktag_t type;
+ kmp_user_lock_p lock;
+ kmp_indirect_locktag_t type;
} kmp_indirect_lock_t;
-// Function tables for direct locks. Set/unset/test differentiate functions with/without consistency checking.
+// Function tables for direct locks. Set/unset/test differentiate functions
+// with/without consistency checking.
extern void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t);
extern void (*__kmp_direct_destroy[])(kmp_dyna_lock_t *);
extern void (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32);
-extern int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32);
-extern int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32);
+extern int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32);
+extern int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32);
-// Function tables for indirect locks. Set/unset/test differentiate functions with/withuot consistency checking.
+// Function tables for indirect locks. Set/unset/test differentiate functions
+// with/withuot consistency checking.
extern void (*__kmp_indirect_init[])(kmp_user_lock_p);
extern void (*__kmp_indirect_destroy[])(kmp_user_lock_p);
extern void (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32);
-extern int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32);
-extern int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32);
+extern int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32);
+extern int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32);
// Extracts direct lock tag from a user lock pointer
-#define KMP_EXTRACT_D_TAG(l) (*((kmp_dyna_lock_t *)(l)) & ((1<<KMP_LOCK_SHIFT)-1) & -(*((kmp_dyna_lock_t *)(l)) & 1))
+#define KMP_EXTRACT_D_TAG(l) \
+ (*((kmp_dyna_lock_t *)(l)) & ((1 << KMP_LOCK_SHIFT) - 1) & \
+ -(*((kmp_dyna_lock_t *)(l)) & 1))
// Extracts indirect lock index from a user lock pointer
#define KMP_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
-// Returns function pointer to the direct lock function with l (kmp_dyna_lock_t *) and op (operation type).
+// Returns function pointer to the direct lock function with l (kmp_dyna_lock_t
+// *) and op (operation type).
#define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op[KMP_EXTRACT_D_TAG(l)]
-// Returns function pointer to the indirect lock function with l (kmp_indirect_lock_t *) and op (operation type).
-#define KMP_I_LOCK_FUNC(l, op) __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type]
+// Returns function pointer to the indirect lock function with l
+// (kmp_indirect_lock_t *) and op (operation type).
+#define KMP_I_LOCK_FUNC(l, op) \
+ __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type]
// Initializes a direct lock with the given lock pointer and lock sequence.
-#define KMP_INIT_D_LOCK(l, seq) __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
+#define KMP_INIT_D_LOCK(l, seq) \
+ __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
// Initializes an indirect lock with the given lock pointer and lock sequence.
-#define KMP_INIT_I_LOCK(l, seq) __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq)
+#define KMP_INIT_I_LOCK(l, seq) \
+ __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq)
// Returns "free" lock value for the given lock type.
-#define KMP_LOCK_FREE(type) (locktag_##type)
+#define KMP_LOCK_FREE(type) (locktag_##type)
// Returns "busy" lock value for the given lock teyp.
-#define KMP_LOCK_BUSY(v, type) ((v)<<KMP_LOCK_SHIFT | locktag_##type)
+#define KMP_LOCK_BUSY(v, type) ((v) << KMP_LOCK_SHIFT | locktag_##type)
// Returns lock value after removing (shifting) lock tag.
-#define KMP_LOCK_STRIP(v) ((v)>>KMP_LOCK_SHIFT)
+#define KMP_LOCK_STRIP(v) ((v) >> KMP_LOCK_SHIFT)
-// Initializes global states and data structures for managing dynamic user locks.
+// Initializes global states and data structures for managing dynamic user
+// locks.
extern void __kmp_init_dynamic_user_locks();
// Allocates and returns an indirect lock with the given indirect lock tag.
-extern kmp_indirect_lock_t * __kmp_allocate_indirect_lock(void **, kmp_int32, kmp_indirect_locktag_t);
+extern kmp_indirect_lock_t *
+__kmp_allocate_indirect_lock(void **, kmp_int32, kmp_indirect_locktag_t);
// Cleans up global states and data structures for managing dynamic user locks.
extern void __kmp_cleanup_indirect_user_locks();
@@ -1238,72 +1204,82 @@ extern void __kmp_cleanup_indirect_user_
extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
// Jump table for "set lock location", available only for indirect locks.
-extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p, const ident_t *);
-#define KMP_SET_I_LOCK_LOCATION(lck, loc) { \
- if (__kmp_indirect_set_location[(lck)->type] != NULL) \
- __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
-}
+extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
+ const ident_t *);
+#define KMP_SET_I_LOCK_LOCATION(lck, loc) \
+ { \
+ if (__kmp_indirect_set_location[(lck)->type] != NULL) \
+ __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
+ }
// Jump table for "set lock flags", available only for indirect locks.
-extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p, kmp_lock_flags_t);
-#define KMP_SET_I_LOCK_FLAGS(lck, flag) { \
- if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
- __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
-}
+extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
+ kmp_lock_flags_t);
+#define KMP_SET_I_LOCK_FLAGS(lck, flag) \
+ { \
+ if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
+ __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
+ }
// Jump table for "get lock location", available only for indirect locks.
-extern const ident_t * (*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p);
-#define KMP_GET_I_LOCK_LOCATION(lck) ( __kmp_indirect_get_location[(lck)->type] != NULL \
- ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
- : NULL )
+extern const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
+ kmp_user_lock_p);
+#define KMP_GET_I_LOCK_LOCATION(lck) \
+ (__kmp_indirect_get_location[(lck)->type] != NULL \
+ ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
+ : NULL)
// Jump table for "get lock flags", available only for indirect locks.
-extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p);
-#define KMP_GET_I_LOCK_FLAGS(lck) ( __kmp_indirect_get_flags[(lck)->type] != NULL \
- ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
- : NULL )
+extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
+ kmp_user_lock_p);
+#define KMP_GET_I_LOCK_FLAGS(lck) \
+ (__kmp_indirect_get_flags[(lck)->type] != NULL \
+ ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
+ : NULL)
-#define KMP_I_LOCK_CHUNK 1024 // number of kmp_indirect_lock_t objects to be allocated together
+#define KMP_I_LOCK_CHUNK \
+ 1024 // number of kmp_indirect_lock_t objects to be allocated together
// Lock table for indirect locks.
typedef struct kmp_indirect_lock_table {
- kmp_indirect_lock_t **table; // blocks of indirect locks allocated
- kmp_lock_index_t size; // size of the indirect lock table
- kmp_lock_index_t next; // index to the next lock to be allocated
+ kmp_indirect_lock_t **table; // blocks of indirect locks allocated
+ kmp_lock_index_t size; // size of the indirect lock table
+ kmp_lock_index_t next; // index to the next lock to be allocated
} kmp_indirect_lock_table_t;
extern kmp_indirect_lock_table_t __kmp_i_lock_table;
// Returns the indirect lock associated with the given index.
-#define KMP_GET_I_LOCK(index) (*(__kmp_i_lock_table.table + (index)/KMP_I_LOCK_CHUNK) + (index)%KMP_I_LOCK_CHUNK)
+#define KMP_GET_I_LOCK(index) \
+ (*(__kmp_i_lock_table.table + (index) / KMP_I_LOCK_CHUNK) + \
+ (index) % KMP_I_LOCK_CHUNK)
// Number of locks in a lock block, which is fixed to "1" now.
-// TODO: No lock block implementation now. If we do support, we need to manage lock block data
-// structure for each indirect lock type.
+// TODO: No lock block implementation now. If we do support, we need to manage
+// lock block data structure for each indirect lock type.
extern int __kmp_num_locks_in_block;
// Fast lock table lookup without consistency checking
-#define KMP_LOOKUP_I_LOCK(l) ( (OMP_LOCK_T_SIZE < sizeof(void *)) \
- ? KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(l)) \
- : *((kmp_indirect_lock_t **)(l)) )
+#define KMP_LOOKUP_I_LOCK(l) \
+ ((OMP_LOCK_T_SIZE < sizeof(void *)) ? KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(l)) \
+ : *((kmp_indirect_lock_t **)(l)))
// Used once in kmp_error.cpp
-extern kmp_int32
-__kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
+extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
#else // KMP_USE_DYNAMIC_LOCK
-# define KMP_LOCK_BUSY(v, type) (v)
-# define KMP_LOCK_FREE(type) 0
-# define KMP_LOCK_STRIP(v) (v)
+#define KMP_LOCK_BUSY(v, type) (v)
+#define KMP_LOCK_FREE(type) 0
+#define KMP_LOCK_STRIP(v) (v)
#endif // KMP_USE_DYNAMIC_LOCK
// data structure for using backoff within spin locks.
typedef struct {
- kmp_uint32 step; // current step
- kmp_uint32 max_backoff; // upper bound of outer delay loop
- kmp_uint32 min_tick; // size of inner delay loop in ticks (machine-dependent)
+ kmp_uint32 step; // current step
+ kmp_uint32 max_backoff; // upper bound of outer delay loop
+ kmp_uint32 min_tick; // size of inner delay loop in ticks (machine-dependent)
} kmp_backoff_t;
// Runtime's default backoff parameters
@@ -1317,4 +1293,3 @@ extern void __kmp_spin_backoff(kmp_backo
#endif // __cplusplus
#endif /* KMP_LOCK_H */
-
Modified: openmp/trunk/runtime/src/kmp_omp.h
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_omp.h?rev=302929&r1=302928&r2=302929&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_omp.h (original)
+++ openmp/trunk/runtime/src/kmp_omp.h Fri May 12 13:01:32 2017
@@ -16,216 +16,224 @@
/* THIS FILE SHOULD NOT BE MODIFIED IN IDB INTERFACE LIBRARY CODE
- * It should instead be modified in the OpenMP runtime and copied
- * to the interface library code. This way we can minimize the
- * problems that this is sure to cause having two copies of the
- * same file.
- *
- * files live in libomp and libomp_db/src/include
- */
+ It should instead be modified in the OpenMP runtime and copied to the
+ interface library code. This way we can minimize the problems that this is
+ sure to cause having two copies of the same file.
+
+ Files live in libomp and libomp_db/src/include */
/* CHANGE THIS WHEN STRUCTURES BELOW CHANGE
- * Before we release this to a customer, please don't change this value. After it is released and
- * stable, then any new updates to the structures or data structure traversal algorithms need to
- * change this value.
- */
+ Before we release this to a customer, please don't change this value. After
+ it is released and stable, then any new updates to the structures or data
+ structure traversal algorithms need to change this value. */
#define KMP_OMP_VERSION 9
typedef struct {
- kmp_int32 offset;
- kmp_int32 size;
+ kmp_int32 offset;
+ kmp_int32 size;
} offset_and_size_t;
typedef struct {
- kmp_uint64 addr;
- kmp_int32 size;
- kmp_int32 padding;
+ kmp_uint64 addr;
+ kmp_int32 size;
+ kmp_int32 padding;
} addr_and_size_t;
typedef struct {
- kmp_uint64 flags; // Flags for future extensions.
- kmp_uint64 file; // Pointer to name of source file where the parallel region is.
- kmp_uint64 func; // Pointer to name of routine where the parallel region is.
- kmp_int32 begin; // Beginning of source line range.
- kmp_int32 end; // End of source line range.
- kmp_int32 num_threads; // Specified number of threads.
+ kmp_uint64 flags; // Flags for future extensions.
+ kmp_uint64
+ file; // Pointer to name of source file where the parallel region is.
+ kmp_uint64 func; // Pointer to name of routine where the parallel region is.
+ kmp_int32 begin; // Beginning of source line range.
+ kmp_int32 end; // End of source line range.
+ kmp_int32 num_threads; // Specified number of threads.
} kmp_omp_nthr_item_t;
typedef struct {
- kmp_int32 num; // Number of items in the arrray.
- kmp_uint64 array; // Address of array of kmp_omp_num_threads_item_t.
+ kmp_int32 num; // Number of items in the arrray.
+ kmp_uint64 array; // Address of array of kmp_omp_num_threads_item_t.
} kmp_omp_nthr_info_t;
-
/* This structure is known to the idb interface library */
typedef struct {
- /* Change this only if you make a fundamental data structure change here */
- kmp_int32 lib_version;
+ /* Change this only if you make a fundamental data structure change here */
+ kmp_int32 lib_version;
- /* sanity check. Only should be checked if versions are identical
- * This is also used for backward compatibility to get the runtime
- * structure size if it the runtime is older than the interface */
- kmp_int32 sizeof_this_structure;
-
- /* OpenMP RTL version info. */
- addr_and_size_t major;
- addr_and_size_t minor;
- addr_and_size_t build;
- addr_and_size_t openmp_version;
- addr_and_size_t banner;
-
- /* Various globals. */
- addr_and_size_t threads; // Pointer to __kmp_threads.
- addr_and_size_t roots; // Pointer to __kmp_root.
- addr_and_size_t capacity; // Pointer to __kmp_threads_capacity.
- addr_and_size_t monitor; // Pointer to __kmp_monitor.
-#if ! KMP_USE_DYNAMIC_LOCK
- addr_and_size_t lock_table; // Pointer to __kmp_lock_table.
+ /* sanity check. Only should be checked if versions are identical
+ * This is also used for backward compatibility to get the runtime
+ * structure size if it the runtime is older than the interface */
+ kmp_int32 sizeof_this_structure;
+
+ /* OpenMP RTL version info. */
+ addr_and_size_t major;
+ addr_and_size_t minor;
+ addr_and_size_t build;
+ addr_and_size_t openmp_version;
+ addr_and_size_t banner;
+
+ /* Various globals. */
+ addr_and_size_t threads; // Pointer to __kmp_threads.
+ addr_and_size_t roots; // Pointer to __kmp_root.
+ addr_and_size_t capacity; // Pointer to __kmp_threads_capacity.
+ addr_and_size_t monitor; // Pointer to __kmp_monitor.
+#if !KMP_USE_DYNAMIC_LOCK
+ addr_and_size_t lock_table; // Pointer to __kmp_lock_table.
#endif
- addr_and_size_t func_microtask;
- addr_and_size_t func_fork;
- addr_and_size_t func_fork_teams;
- addr_and_size_t team_counter;
- addr_and_size_t task_counter;
- addr_and_size_t nthr_info;
- kmp_int32 address_width;
- kmp_int32 indexed_locks;
- kmp_int32 last_barrier; // The end in enum barrier_type
- kmp_int32 deque_size; // TASK_DEQUE_SIZE
-
- /* thread structure information. */
- kmp_int32 th_sizeof_struct;
- offset_and_size_t th_info; // descriptor for thread
- offset_and_size_t th_team; // team for this thread
- offset_and_size_t th_root; // root for this thread
- offset_and_size_t th_serial_team; // serial team under this thread
- offset_and_size_t th_ident; // location for this thread (if available)
- offset_and_size_t th_spin_here; // is thread waiting for lock (if available)
- offset_and_size_t th_next_waiting; // next thread waiting for lock (if available)
- offset_and_size_t th_task_team; // task team struct
- offset_and_size_t th_current_task; // innermost task being executed
- offset_and_size_t th_task_state; // alternating 0/1 for task team identification
- offset_and_size_t th_bar;
- offset_and_size_t th_b_worker_arrived; // the worker increases it by 1 when it arrives to the barrier
+ addr_and_size_t func_microtask;
+ addr_and_size_t func_fork;
+ addr_and_size_t func_fork_teams;
+ addr_and_size_t team_counter;
+ addr_and_size_t task_counter;
+ addr_and_size_t nthr_info;
+ kmp_int32 address_width;
+ kmp_int32 indexed_locks;
+ kmp_int32 last_barrier; // The end in enum barrier_type
+ kmp_int32 deque_size; // TASK_DEQUE_SIZE
+
+ /* thread structure information. */
+ kmp_int32 th_sizeof_struct;
+ offset_and_size_t th_info; // descriptor for thread
+ offset_and_size_t th_team; // team for this thread
+ offset_and_size_t th_root; // root for this thread
+ offset_and_size_t th_serial_team; // serial team under this thread
+ offset_and_size_t th_ident; // location for this thread (if available)
+ offset_and_size_t th_spin_here; // is thread waiting for lock (if available)
+ offset_and_size_t
+ th_next_waiting; // next thread waiting for lock (if available)
+ offset_and_size_t th_task_team; // task team struct
+ offset_and_size_t th_current_task; // innermost task being executed
+ offset_and_size_t
+ th_task_state; // alternating 0/1 for task team identification
+ offset_and_size_t th_bar;
+ offset_and_size_t th_b_worker_arrived; // the worker increases it by 1 when it
+// arrives to the barrier
#if OMP_40_ENABLED
- /* teams information */
- offset_and_size_t th_teams_microtask;// entry address for teams construct
- offset_and_size_t th_teams_level; // initial level of teams construct
- offset_and_size_t th_teams_nteams; // number of teams in a league
- offset_and_size_t th_teams_nth; // number of threads in each team of the league
+ /* teams information */
+ offset_and_size_t th_teams_microtask; // entry address for teams construct
+ offset_and_size_t th_teams_level; // initial level of teams construct
+ offset_and_size_t th_teams_nteams; // number of teams in a league
+ offset_and_size_t
+ th_teams_nth; // number of threads in each team of the league
#endif
- /* kmp_desc structure (for info field above) */
- kmp_int32 ds_sizeof_struct;
- offset_and_size_t ds_tid; // team thread id
- offset_and_size_t ds_gtid; // global thread id
- offset_and_size_t ds_thread; // native thread id
-
- /* team structure information */
- kmp_int32 t_sizeof_struct;
- offset_and_size_t t_master_tid; // tid of master in parent team
- offset_and_size_t t_ident; // location of parallel region
- offset_and_size_t t_parent; // parent team
- offset_and_size_t t_nproc; // # team threads
- offset_and_size_t t_threads; // array of threads
- offset_and_size_t t_serialized; // # levels of serialized teams
- offset_and_size_t t_id; // unique team id
- offset_and_size_t t_pkfn;
- offset_and_size_t t_task_team; // task team structure
- offset_and_size_t t_implicit_task; // taskdata for the thread's implicit task
+ /* kmp_desc structure (for info field above) */
+ kmp_int32 ds_sizeof_struct;
+ offset_and_size_t ds_tid; // team thread id
+ offset_and_size_t ds_gtid; // global thread id
+ offset_and_size_t ds_thread; // native thread id
+
+ /* team structure information */
+ kmp_int32 t_sizeof_struct;
+ offset_and_size_t t_master_tid; // tid of master in parent team
+ offset_and_size_t t_ident; // location of parallel region
+ offset_and_size_t t_parent; // parent team
+ offset_and_size_t t_nproc; // # team threads
+ offset_and_size_t t_threads; // array of threads
+ offset_and_size_t t_serialized; // # levels of serialized teams
+ offset_and_size_t t_id; // unique team id
+ offset_and_size_t t_pkfn;
+ offset_and_size_t t_task_team; // task team structure
+ offset_and_size_t t_implicit_task; // taskdata for the thread's implicit task
#if OMP_40_ENABLED
- offset_and_size_t t_cancel_request;
+ offset_and_size_t t_cancel_request;
#endif
- offset_and_size_t t_bar;
- offset_and_size_t t_b_master_arrived; // increased by 1 when master arrives to a barrier
- offset_and_size_t t_b_team_arrived; // increased by one when all the threads arrived
-
- /* root structure information */
- kmp_int32 r_sizeof_struct;
- offset_and_size_t r_root_team; // team at root
- offset_and_size_t r_hot_team; // hot team for this root
- offset_and_size_t r_uber_thread; // root thread
- offset_and_size_t r_root_id; // unique root id (if available)
-
- /* ident structure information */
- kmp_int32 id_sizeof_struct;
- offset_and_size_t id_psource; /* address of string ";file;func;line1;line2;;". */
- offset_and_size_t id_flags;
-
- /* lock structure information */
- kmp_int32 lk_sizeof_struct;
- offset_and_size_t lk_initialized;
- offset_and_size_t lk_location;
- offset_and_size_t lk_tail_id;
- offset_and_size_t lk_head_id;
- offset_and_size_t lk_next_ticket;
- offset_and_size_t lk_now_serving;
- offset_and_size_t lk_owner_id;
- offset_and_size_t lk_depth_locked;
- offset_and_size_t lk_lock_flags;
-
-#if ! KMP_USE_DYNAMIC_LOCK
- /* lock_table_t */
- kmp_int32 lt_size_of_struct; /* Size and layout of kmp_lock_table_t. */
- offset_and_size_t lt_used;
- offset_and_size_t lt_allocated;
- offset_and_size_t lt_table;
+ offset_and_size_t t_bar;
+ offset_and_size_t
+ t_b_master_arrived; // increased by 1 when master arrives to a barrier
+ offset_and_size_t
+ t_b_team_arrived; // increased by one when all the threads arrived
+
+ /* root structure information */
+ kmp_int32 r_sizeof_struct;
+ offset_and_size_t r_root_team; // team at root
+ offset_and_size_t r_hot_team; // hot team for this root
+ offset_and_size_t r_uber_thread; // root thread
+ offset_and_size_t r_root_id; // unique root id (if available)
+
+ /* ident structure information */
+ kmp_int32 id_sizeof_struct;
+ offset_and_size_t
+ id_psource; /* address of string ";file;func;line1;line2;;". */
+ offset_and_size_t id_flags;
+
+ /* lock structure information */
+ kmp_int32 lk_sizeof_struct;
+ offset_and_size_t lk_initialized;
+ offset_and_size_t lk_location;
+ offset_and_size_t lk_tail_id;
+ offset_and_size_t lk_head_id;
+ offset_and_size_t lk_next_ticket;
+ offset_and_size_t lk_now_serving;
+ offset_and_size_t lk_owner_id;
+ offset_and_size_t lk_depth_locked;
+ offset_and_size_t lk_lock_flags;
+
+#if !KMP_USE_DYNAMIC_LOCK
+ /* lock_table_t */
+ kmp_int32 lt_size_of_struct; /* Size and layout of kmp_lock_table_t. */
+ offset_and_size_t lt_used;
+ offset_and_size_t lt_allocated;
+ offset_and_size_t lt_table;
#endif
- /* task_team_t */
- kmp_int32 tt_sizeof_struct;
- offset_and_size_t tt_threads_data;
- offset_and_size_t tt_found_tasks;
- offset_and_size_t tt_nproc;
- offset_and_size_t tt_unfinished_threads;
- offset_and_size_t tt_active;
-
- /* kmp_taskdata_t */
- kmp_int32 td_sizeof_struct;
- offset_and_size_t td_task_id; // task id
- offset_and_size_t td_flags; // task flags
- offset_and_size_t td_team; // team for this task
- offset_and_size_t td_parent; // parent task
- offset_and_size_t td_level; // task testing level
- offset_and_size_t td_ident; // task identifier
- offset_and_size_t td_allocated_child_tasks; // child tasks (+ current task) not yet deallocated
- offset_and_size_t td_incomplete_child_tasks; // child tasks not yet complete
-
- /* Taskwait */
- offset_and_size_t td_taskwait_ident;
- offset_and_size_t td_taskwait_counter;
- offset_and_size_t td_taskwait_thread; // gtid + 1 of thread encountered taskwait
+ /* task_team_t */
+ kmp_int32 tt_sizeof_struct;
+ offset_and_size_t tt_threads_data;
+ offset_and_size_t tt_found_tasks;
+ offset_and_size_t tt_nproc;
+ offset_and_size_t tt_unfinished_threads;
+ offset_and_size_t tt_active;
+
+ /* kmp_taskdata_t */
+ kmp_int32 td_sizeof_struct;
+ offset_and_size_t td_task_id; // task id
+ offset_and_size_t td_flags; // task flags
+ offset_and_size_t td_team; // team for this task
+ offset_and_size_t td_parent; // parent task
+ offset_and_size_t td_level; // task testing level
+ offset_and_size_t td_ident; // task identifier
+ offset_and_size_t td_allocated_child_tasks; // child tasks (+ current task)
+ // not yet deallocated
+ offset_and_size_t td_incomplete_child_tasks; // child tasks not yet complete
+
+ /* Taskwait */
+ offset_and_size_t td_taskwait_ident;
+ offset_and_size_t td_taskwait_counter;
+ offset_and_size_t
+ td_taskwait_thread; // gtid + 1 of thread encountered taskwait
#if OMP_40_ENABLED
- /* Taskgroup */
- offset_and_size_t td_taskgroup; // pointer to the current taskgroup
- offset_and_size_t td_task_count; // number of allocated and not yet complete tasks
- offset_and_size_t td_cancel; // request for cancellation of this taskgroup
-
- /* Task dependency */
- offset_and_size_t td_depnode; // pointer to graph node if the task has dependencies
- offset_and_size_t dn_node;
- offset_and_size_t dn_next;
- offset_and_size_t dn_successors;
- offset_and_size_t dn_task;
- offset_and_size_t dn_npredecessors;
- offset_and_size_t dn_nrefs;
+ /* Taskgroup */
+ offset_and_size_t td_taskgroup; // pointer to the current taskgroup
+ offset_and_size_t
+ td_task_count; // number of allocated and not yet complete tasks
+ offset_and_size_t td_cancel; // request for cancellation of this taskgroup
+
+ /* Task dependency */
+ offset_and_size_t
+ td_depnode; // pointer to graph node if the task has dependencies
+ offset_and_size_t dn_node;
+ offset_and_size_t dn_next;
+ offset_and_size_t dn_successors;
+ offset_and_size_t dn_task;
+ offset_and_size_t dn_npredecessors;
+ offset_and_size_t dn_nrefs;
#endif
- offset_and_size_t dn_routine;
+ offset_and_size_t dn_routine;
- /* kmp_thread_data_t */
- kmp_int32 hd_sizeof_struct;
- offset_and_size_t hd_deque;
- offset_and_size_t hd_deque_size;
- offset_and_size_t hd_deque_head;
- offset_and_size_t hd_deque_tail;
- offset_and_size_t hd_deque_ntasks;
- offset_and_size_t hd_deque_last_stolen;
+ /* kmp_thread_data_t */
+ kmp_int32 hd_sizeof_struct;
+ offset_and_size_t hd_deque;
+ offset_and_size_t hd_deque_size;
+ offset_and_size_t hd_deque_head;
+ offset_and_size_t hd_deque_tail;
+ offset_and_size_t hd_deque_ntasks;
+ offset_and_size_t hd_deque_last_stolen;
- // The last field of stable version.
- kmp_uint64 last_field;
+ // The last field of stable version.
+ kmp_uint64 last_field;
} kmp_omp_struct_info_t;
Modified: openmp/trunk/runtime/src/kmp_os.h
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_os.h?rev=302929&r1=302928&r2=302929&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_os.h (original)
+++ openmp/trunk/runtime/src/kmp_os.h Fri May 12 13:01:32 2017
@@ -19,26 +19,26 @@
#include "kmp_config.h"
#include <stdlib.h>
-#define KMP_FTN_PLAIN 1
-#define KMP_FTN_APPEND 2
-#define KMP_FTN_UPPER 3
+#define KMP_FTN_PLAIN 1
+#define KMP_FTN_APPEND 2
+#define KMP_FTN_UPPER 3
/*
#define KMP_FTN_PREPEND 4
#define KMP_FTN_UAPPEND 5
*/
-#define KMP_PTR_SKIP (sizeof(void*))
+#define KMP_PTR_SKIP (sizeof(void *))
/* -------------------------- Compiler variations ------------------------ */
-#define KMP_OFF 0
-#define KMP_ON 1
+#define KMP_OFF 0
+#define KMP_ON 1
-#define KMP_MEM_CONS_VOLATILE 0
-#define KMP_MEM_CONS_FENCE 1
+#define KMP_MEM_CONS_VOLATILE 0
+#define KMP_MEM_CONS_FENCE 1
#ifndef KMP_MEM_CONS_MODEL
-# define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE
+#define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE
#endif
/* ------------------------- Compiler recognition ---------------------- */
@@ -47,202 +47,197 @@
#define KMP_COMPILER_CLANG 0
#define KMP_COMPILER_MSVC 0
-#if defined( __INTEL_COMPILER )
-# undef KMP_COMPILER_ICC
-# define KMP_COMPILER_ICC 1
-#elif defined( __clang__ )
-# undef KMP_COMPILER_CLANG
-# define KMP_COMPILER_CLANG 1
-#elif defined( __GNUC__ )
-# undef KMP_COMPILER_GCC
-# define KMP_COMPILER_GCC 1
-#elif defined( _MSC_VER )
-# undef KMP_COMPILER_MSVC
-# define KMP_COMPILER_MSVC 1
+#if defined(__INTEL_COMPILER)
+#undef KMP_COMPILER_ICC
+#define KMP_COMPILER_ICC 1
+#elif defined(__clang__)
+#undef KMP_COMPILER_CLANG
+#define KMP_COMPILER_CLANG 1
+#elif defined(__GNUC__)
+#undef KMP_COMPILER_GCC
+#define KMP_COMPILER_GCC 1
+#elif defined(_MSC_VER)
+#undef KMP_COMPILER_MSVC
+#define KMP_COMPILER_MSVC 1
#else
-# error Unknown compiler
+#error Unknown compiler
#endif
#if (KMP_OS_LINUX || KMP_OS_WINDOWS) && !KMP_OS_CNK && !KMP_ARCH_PPC64
-# define KMP_AFFINITY_SUPPORTED 1
-# if KMP_OS_WINDOWS && KMP_ARCH_X86_64
-# define KMP_GROUP_AFFINITY 1
-# else
-# define KMP_GROUP_AFFINITY 0
-# endif
+#define KMP_AFFINITY_SUPPORTED 1
+#if KMP_OS_WINDOWS && KMP_ARCH_X86_64
+#define KMP_GROUP_AFFINITY 1
#else
-# define KMP_AFFINITY_SUPPORTED 0
-# define KMP_GROUP_AFFINITY 0
+#define KMP_GROUP_AFFINITY 0
+#endif
+#else
+#define KMP_AFFINITY_SUPPORTED 0
+#define KMP_GROUP_AFFINITY 0
#endif
/* Check for quad-precision extension. */
#define KMP_HAVE_QUAD 0
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
-# if KMP_COMPILER_ICC
- /* _Quad is already defined for icc */
-# undef KMP_HAVE_QUAD
-# define KMP_HAVE_QUAD 1
-# elif KMP_COMPILER_CLANG
- /* Clang doesn't support a software-implemented
- 128-bit extended precision type yet */
- typedef long double _Quad;
-# elif KMP_COMPILER_GCC
- typedef __float128 _Quad;
-# undef KMP_HAVE_QUAD
-# define KMP_HAVE_QUAD 1
-# elif KMP_COMPILER_MSVC
- typedef long double _Quad;
-# endif
-#else
-# if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC
- typedef long double _Quad;
-# undef KMP_HAVE_QUAD
-# define KMP_HAVE_QUAD 1
-# endif
+#if KMP_COMPILER_ICC
+/* _Quad is already defined for icc */
+#undef KMP_HAVE_QUAD
+#define KMP_HAVE_QUAD 1
+#elif KMP_COMPILER_CLANG
+/* Clang doesn't support a software-implemented
+ 128-bit extended precision type yet */
+typedef long double _Quad;
+#elif KMP_COMPILER_GCC
+typedef __float128 _Quad;
+#undef KMP_HAVE_QUAD
+#define KMP_HAVE_QUAD 1
+#elif KMP_COMPILER_MSVC
+typedef long double _Quad;
+#endif
+#else
+#if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC
+typedef long double _Quad;
+#undef KMP_HAVE_QUAD
+#define KMP_HAVE_QUAD 1
+#endif
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
#if KMP_OS_WINDOWS
- typedef char kmp_int8;
- typedef unsigned char kmp_uint8;
- typedef short kmp_int16;
- typedef unsigned short kmp_uint16;
- typedef int kmp_int32;
- typedef unsigned int kmp_uint32;
-# define KMP_INT32_SPEC "d"
-# define KMP_UINT32_SPEC "u"
-# ifndef KMP_STRUCT64
- typedef __int64 kmp_int64;
- typedef unsigned __int64 kmp_uint64;
- #define KMP_INT64_SPEC "I64d"
- #define KMP_UINT64_SPEC "I64u"
-# else
- struct kmp_struct64 {
- kmp_int32 a,b;
- };
- typedef struct kmp_struct64 kmp_int64;
- typedef struct kmp_struct64 kmp_uint64;
- /* Not sure what to use for KMP_[U]INT64_SPEC here */
-# endif
-# if KMP_ARCH_X86_64
-# define KMP_INTPTR 1
- typedef __int64 kmp_intptr_t;
- typedef unsigned __int64 kmp_uintptr_t;
-# define KMP_INTPTR_SPEC "I64d"
-# define KMP_UINTPTR_SPEC "I64u"
-# endif
+typedef char kmp_int8;
+typedef unsigned char kmp_uint8;
+typedef short kmp_int16;
+typedef unsigned short kmp_uint16;
+typedef int kmp_int32;
+typedef unsigned int kmp_uint32;
+#define KMP_INT32_SPEC "d"
+#define KMP_UINT32_SPEC "u"
+#ifndef KMP_STRUCT64
+typedef __int64 kmp_int64;
+typedef unsigned __int64 kmp_uint64;
+#define KMP_INT64_SPEC "I64d"
+#define KMP_UINT64_SPEC "I64u"
+#else
+struct kmp_struct64 {
+ kmp_int32 a, b;
+};
+typedef struct kmp_struct64 kmp_int64;
+typedef struct kmp_struct64 kmp_uint64;
+/* Not sure what to use for KMP_[U]INT64_SPEC here */
+#endif
+#if KMP_ARCH_X86_64
+#define KMP_INTPTR 1
+typedef __int64 kmp_intptr_t;
+typedef unsigned __int64 kmp_uintptr_t;
+#define KMP_INTPTR_SPEC "I64d"
+#define KMP_UINTPTR_SPEC "I64u"
+#endif
#endif /* KMP_OS_WINDOWS */
#if KMP_OS_UNIX
- typedef char kmp_int8;
- typedef unsigned char kmp_uint8;
- typedef short kmp_int16;
- typedef unsigned short kmp_uint16;
- typedef int kmp_int32;
- typedef unsigned int kmp_uint32;
- typedef long long kmp_int64;
- typedef unsigned long long kmp_uint64;
-# define KMP_INT32_SPEC "d"
-# define KMP_UINT32_SPEC "u"
-# define KMP_INT64_SPEC "lld"
-# define KMP_UINT64_SPEC "llu"
+typedef char kmp_int8;
+typedef unsigned char kmp_uint8;
+typedef short kmp_int16;
+typedef unsigned short kmp_uint16;
+typedef int kmp_int32;
+typedef unsigned int kmp_uint32;
+typedef long long kmp_int64;
+typedef unsigned long long kmp_uint64;
+#define KMP_INT32_SPEC "d"
+#define KMP_UINT32_SPEC "u"
+#define KMP_INT64_SPEC "lld"
+#define KMP_UINT64_SPEC "llu"
#endif /* KMP_OS_UNIX */
#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
-# define KMP_SIZE_T_SPEC KMP_UINT32_SPEC
+#define KMP_SIZE_T_SPEC KMP_UINT32_SPEC
#elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS64
-# define KMP_SIZE_T_SPEC KMP_UINT64_SPEC
+#define KMP_SIZE_T_SPEC KMP_UINT64_SPEC
#else
-# error "Can't determine size_t printf format specifier."
+#error "Can't determine size_t printf format specifier."
#endif
#if KMP_ARCH_X86
-# define KMP_SIZE_T_MAX (0xFFFFFFFF)
+#define KMP_SIZE_T_MAX (0xFFFFFFFF)
#else
-# define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF)
+#define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF)
#endif
-typedef size_t kmp_size_t;
-typedef float kmp_real32;
-typedef double kmp_real64;
+typedef size_t kmp_size_t;
+typedef float kmp_real32;
+typedef double kmp_real64;
#ifndef KMP_INTPTR
-# define KMP_INTPTR 1
- typedef long kmp_intptr_t;
- typedef unsigned long kmp_uintptr_t;
-# define KMP_INTPTR_SPEC "ld"
-# define KMP_UINTPTR_SPEC "lu"
+#define KMP_INTPTR 1
+typedef long kmp_intptr_t;
+typedef unsigned long kmp_uintptr_t;
+#define KMP_INTPTR_SPEC "ld"
+#define KMP_UINTPTR_SPEC "lu"
#endif
#ifdef BUILD_I8
- typedef kmp_int64 kmp_int;
- typedef kmp_uint64 kmp_uint;
+typedef kmp_int64 kmp_int;
+typedef kmp_uint64 kmp_uint;
#else
- typedef kmp_int32 kmp_int;
- typedef kmp_uint32 kmp_uint;
+typedef kmp_int32 kmp_int;
+typedef kmp_uint32 kmp_uint;
#endif /* BUILD_I8 */
-#define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF)
-#define KMP_INT_MIN ((kmp_int32)0x80000000)
+#define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF)
+#define KMP_INT_MIN ((kmp_int32)0x80000000)
#ifdef __cplusplus
- //-------------------------------------------------------------------------
- // template for debug prints specification ( d, u, lld, llu ), and to obtain
- // signed/unsigned flavors of a type
- template< typename T >
- struct traits_t { };
- // int
- template<>
- struct traits_t< signed int > {
- typedef signed int signed_t;
- typedef unsigned int unsigned_t;
- typedef double floating_t;
- static char const * spec;
- static const signed_t max_value = 0x7fffffff;
- static const signed_t min_value = 0x80000000;
- static const int type_size = sizeof(signed_t);
- };
- // unsigned int
- template<>
- struct traits_t< unsigned int > {
- typedef signed int signed_t;
- typedef unsigned int unsigned_t;
- typedef double floating_t;
- static char const * spec;
- static const unsigned_t max_value = 0xffffffff;
- static const unsigned_t min_value = 0x00000000;
- static const int type_size = sizeof(unsigned_t);
- };
- // long long
- template<>
- struct traits_t< signed long long > {
- typedef signed long long signed_t;
- typedef unsigned long long unsigned_t;
- typedef long double floating_t;
- static char const * spec;
- static const signed_t max_value = 0x7fffffffffffffffLL;
- static const signed_t min_value = 0x8000000000000000LL;
- static const int type_size = sizeof(signed_t);
- };
- // unsigned long long
- template<>
- struct traits_t< unsigned long long > {
- typedef signed long long signed_t;
- typedef unsigned long long unsigned_t;
- typedef long double floating_t;
- static char const * spec;
- static const unsigned_t max_value = 0xffffffffffffffffLL;
- static const unsigned_t min_value = 0x0000000000000000LL;
- static const int type_size = sizeof(unsigned_t);
- };
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
+// template for debug prints specification ( d, u, lld, llu ), and to obtain
+// signed/unsigned flavors of a type
+template <typename T> struct traits_t {};
+// int
+template <> struct traits_t<signed int> {
+ typedef signed int signed_t;
+ typedef unsigned int unsigned_t;
+ typedef double floating_t;
+ static char const *spec;
+ static const signed_t max_value = 0x7fffffff;
+ static const signed_t min_value = 0x80000000;
+ static const int type_size = sizeof(signed_t);
+};
+// unsigned int
+template <> struct traits_t<unsigned int> {
+ typedef signed int signed_t;
+ typedef unsigned int unsigned_t;
+ typedef double floating_t;
+ static char const *spec;
+ static const unsigned_t max_value = 0xffffffff;
+ static const unsigned_t min_value = 0x00000000;
+ static const int type_size = sizeof(unsigned_t);
+};
+// long long
+template <> struct traits_t<signed long long> {
+ typedef signed long long signed_t;
+ typedef unsigned long long unsigned_t;
+ typedef long double floating_t;
+ static char const *spec;
+ static const signed_t max_value = 0x7fffffffffffffffLL;
+ static const signed_t min_value = 0x8000000000000000LL;
+ static const int type_size = sizeof(signed_t);
+};
+// unsigned long long
+template <> struct traits_t<unsigned long long> {
+ typedef signed long long signed_t;
+ typedef unsigned long long unsigned_t;
+ typedef long double floating_t;
+ static char const *spec;
+ static const unsigned_t max_value = 0xffffffffffffffffLL;
+ static const unsigned_t min_value = 0x0000000000000000LL;
+ static const int type_size = sizeof(unsigned_t);
+};
+//-------------------------------------------------------------------------
#endif // __cplusplus
-#define KMP_EXPORT extern /* export declaration in guide libraries */
+#define KMP_EXPORT extern /* export declaration in guide libraries */
#if __GNUC__ >= 4
- #define __forceinline __inline
+#define __forceinline __inline
#endif
-#define PAGE_SIZE (0x4000)
+#define PAGE_SIZE (0x4000)
#if KMP_OS_LINUX
#define KMP_GET_PAGE_SIZE() getpagesize()
@@ -252,11 +247,12 @@ typedef double kmp_real64;
#define KMP_GET_PAGE_SIZE() PAGE_SIZE
#endif
-#define PAGE_ALIGNED(_addr) ( ! ((size_t) _addr & \
- (size_t)(KMP_GET_PAGE_SIZE() - 1)))
-#define ALIGN_TO_PAGE(x) (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1)))
+#define PAGE_ALIGNED(_addr) \
+ (!((size_t)_addr & (size_t)(KMP_GET_PAGE_SIZE() - 1)))
+#define ALIGN_TO_PAGE(x) \
+ (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1)))
-/* ---------------------- Support for cache alignment, padding, etc. -----------------*/
+/* ---------- Support for cache alignment, padding, etc. ----------------*/
#ifdef __cplusplus
extern "C" {
@@ -266,42 +262,39 @@ extern "C" {
/* Define the default size of the cache line */
#ifndef CACHE_LINE
- #define CACHE_LINE 128 /* cache line size in bytes */
+#define CACHE_LINE 128 /* cache line size in bytes */
#else
- #if ( CACHE_LINE < 64 ) && ! defined( KMP_OS_DARWIN )
- // 2006-02-13: This produces too many warnings on OS X*. Disable it for a while...
- #warning CACHE_LINE is too small.
- #endif
+#if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN)
+// 2006-02-13: This produces too many warnings on OS X*. Disable for now
+#warning CACHE_LINE is too small.
+#endif
#endif /* CACHE_LINE */
-#define KMP_CACHE_PREFETCH(ADDR) /* nothing */
+#define KMP_CACHE_PREFETCH(ADDR) /* nothing */
/* Temporary note: if performance testing of this passes, we can remove
all references to KMP_DO_ALIGN and replace with KMP_ALIGN. */
#if KMP_OS_UNIX && defined(__GNUC__)
-# define KMP_DO_ALIGN(bytes) __attribute__((aligned(bytes)))
-# define KMP_ALIGN_CACHE __attribute__((aligned(CACHE_LINE)))
-# define KMP_ALIGN_CACHE_INTERNODE __attribute__((aligned(INTERNODE_CACHE_LINE)))
-# define KMP_ALIGN(bytes) __attribute__((aligned(bytes)))
-#else
-# define KMP_DO_ALIGN(bytes) __declspec( align(bytes) )
-# define KMP_ALIGN_CACHE __declspec( align(CACHE_LINE) )
-# define KMP_ALIGN_CACHE_INTERNODE __declspec( align(INTERNODE_CACHE_LINE) )
-# define KMP_ALIGN(bytes) __declspec( align(bytes) )
+#define KMP_DO_ALIGN(bytes) __attribute__((aligned(bytes)))
+#define KMP_ALIGN_CACHE __attribute__((aligned(CACHE_LINE)))
+#define KMP_ALIGN_CACHE_INTERNODE __attribute__((aligned(INTERNODE_CACHE_LINE)))
+#define KMP_ALIGN(bytes) __attribute__((aligned(bytes)))
+#else
+#define KMP_DO_ALIGN(bytes) __declspec(align(bytes))
+#define KMP_ALIGN_CACHE __declspec(align(CACHE_LINE))
+#define KMP_ALIGN_CACHE_INTERNODE __declspec(align(INTERNODE_CACHE_LINE))
+#define KMP_ALIGN(bytes) __declspec(align(bytes))
#endif
/* General purpose fence types for memory operations */
enum kmp_mem_fence_type {
- kmp_no_fence, /* No memory fence */
- kmp_acquire_fence, /* Acquire (read) memory fence */
- kmp_release_fence, /* Release (write) memory fence */
- kmp_full_fence /* Full (read+write) memory fence */
+ kmp_no_fence, /* No memory fence */
+ kmp_acquire_fence, /* Acquire (read) memory fence */
+ kmp_release_fence, /* Release (write) memory fence */
+ kmp_full_fence /* Full (read+write) memory fence */
};
-
-//
// Synchronization primitives
-//
#if KMP_ASM_INTRINS && KMP_OS_WINDOWS
@@ -312,292 +305,379 @@ enum kmp_mem_fence_type {
#pragma intrinsic(InterlockedExchange)
#pragma intrinsic(InterlockedExchange64)
-//
// Using InterlockedIncrement / InterlockedDecrement causes a library loading
// ordering problem, so we use InterlockedExchangeAdd instead.
-//
-# define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd( (volatile long *)(p), 1 )
-# define KMP_TEST_THEN_INC_ACQ32(p) InterlockedExchangeAdd( (volatile long *)(p), 1 )
-# define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd( (volatile long *)(p), 4 )
-# define KMP_TEST_THEN_ADD4_ACQ32(p) InterlockedExchangeAdd( (volatile long *)(p), 4 )
-# define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd( (volatile long *)(p), -1 )
-# define KMP_TEST_THEN_DEC_ACQ32(p) InterlockedExchangeAdd( (volatile long *)(p), -1 )
-# define KMP_TEST_THEN_ADD32(p, v) InterlockedExchangeAdd( (volatile long *)(p), (v) )
-
-extern kmp_int8 __kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 v );
-extern kmp_int8 __kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 v );
-extern kmp_int8 __kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 v );
-# define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) InterlockedCompareExchange( (volatile long *)(p),(long)(sv),(long)(cv) )
-
-# define KMP_XCHG_FIXED32(p, v) InterlockedExchange( (volatile long *)(p), (long)(v) )
-# define KMP_XCHG_FIXED64(p, v) InterlockedExchange64( (volatile kmp_int64 *)(p), (kmp_int64)(v) )
-
-inline kmp_real32 KMP_XCHG_REAL32( volatile kmp_real32 *p, kmp_real32 v)
-{
- kmp_int32 tmp = InterlockedExchange( (volatile long *)p, *(long *)&v);
- return *(kmp_real32*)&tmp;
+#define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd((volatile long *)(p), 1)
+#define KMP_TEST_THEN_INC_ACQ32(p) \
+ InterlockedExchangeAdd((volatile long *)(p), 1)
+#define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd((volatile long *)(p), 4)
+#define KMP_TEST_THEN_ADD4_ACQ32(p) \
+ InterlockedExchangeAdd((volatile long *)(p), 4)
+#define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd((volatile long *)(p), -1)
+#define KMP_TEST_THEN_DEC_ACQ32(p) \
+ InterlockedExchangeAdd((volatile long *)(p), -1)
+#define KMP_TEST_THEN_ADD32(p, v) \
+ InterlockedExchangeAdd((volatile long *)(p), (v))
+
+extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
+extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
+extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
+#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
+ InterlockedCompareExchange((volatile long *)(p), (long)(sv), (long)(cv))
+
+#define KMP_XCHG_FIXED32(p, v) \
+ InterlockedExchange((volatile long *)(p), (long)(v))
+#define KMP_XCHG_FIXED64(p, v) \
+ InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v))
+
+inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
+ kmp_int32 tmp = InterlockedExchange((volatile long *)p, *(long *)&v);
+ return *(kmp_real32 *)&tmp;
}
-//
// Routines that we still need to implement in assembly.
-//
-extern kmp_int32 __kmp_test_then_add32( volatile kmp_int32 *p, kmp_int32 v );
-extern kmp_int32 __kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 v );
-extern kmp_int32 __kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 v );
-extern kmp_int64 __kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 v );
-extern kmp_int64 __kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 v );
-extern kmp_int64 __kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 v );
-
-extern kmp_int8 __kmp_compare_and_store8( volatile kmp_int8 *p, kmp_int8 cv, kmp_int8 sv );
-extern kmp_int16 __kmp_compare_and_store16( volatile kmp_int16 *p, kmp_int16 cv, kmp_int16 sv );
-extern kmp_int32 __kmp_compare_and_store32( volatile kmp_int32 *p, kmp_int32 cv, kmp_int32 sv );
-extern kmp_int32 __kmp_compare_and_store64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
-extern kmp_int8 __kmp_compare_and_store_ret8( volatile kmp_int8 *p, kmp_int8 cv, kmp_int8 sv );
-extern kmp_int16 __kmp_compare_and_store_ret16( volatile kmp_int16 *p, kmp_int16 cv, kmp_int16 sv );
-extern kmp_int32 __kmp_compare_and_store_ret32( volatile kmp_int32 *p, kmp_int32 cv, kmp_int32 sv );
-extern kmp_int64 __kmp_compare_and_store_ret64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
-
-extern kmp_int8 __kmp_xchg_fixed8( volatile kmp_int8 *p, kmp_int8 v );
-extern kmp_int16 __kmp_xchg_fixed16( volatile kmp_int16 *p, kmp_int16 v );
-extern kmp_int32 __kmp_xchg_fixed32( volatile kmp_int32 *p, kmp_int32 v );
-extern kmp_int64 __kmp_xchg_fixed64( volatile kmp_int64 *p, kmp_int64 v );
-extern kmp_real32 __kmp_xchg_real32( volatile kmp_real32 *p, kmp_real32 v );
-extern kmp_real64 __kmp_xchg_real64( volatile kmp_real64 *p, kmp_real64 v );
-# define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8( (p), (v) )
-
-//# define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32( (p), 1 )
-# define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8( (p), (v) )
-# define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8( (p), (v) )
-//# define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32( (p), 1 )
-# define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64( (p), 1LL )
-# define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64( (p), 1LL )
-//# define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32( (p), 4 )
-//# define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32( (p), 4 )
-# define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64( (p), 4LL )
-# define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64( (p), 4LL )
-//# define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32( (p), -1 )
-//# define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32( (p), -1 )
-# define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64( (p), -1LL )
-# define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64( (p), -1LL )
-//# define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32( (p), (v) )
-# define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64( (p), (v) )
-
-# define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32( (p), (v) )
-# define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32( (p), (v) )
-# define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64( (p), (v) )
-# define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64( (p), (v) )
-
-# define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) __kmp_compare_and_store8( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) __kmp_compare_and_store8( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) __kmp_compare_and_store16( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) __kmp_compare_and_store16( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) __kmp_compare_and_store32( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) __kmp_compare_and_store32( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) __kmp_compare_and_store64( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) __kmp_compare_and_store64( (p), (cv), (sv) )
-
-# if KMP_ARCH_X86
-# define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) __kmp_compare_and_store32( (volatile kmp_int32*)(p), (kmp_int32)(cv), (kmp_int32)(sv) )
-# else /* 64 bit pointers */
-# define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) __kmp_compare_and_store64( (volatile kmp_int64*)(p), (kmp_int64)(cv), (kmp_int64)(sv) )
-# endif /* KMP_ARCH_X86 */
-
-# define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) __kmp_compare_and_store_ret8( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) __kmp_compare_and_store_ret16( (p), (cv), (sv) )
-//# define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) __kmp_compare_and_store_ret32( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) __kmp_compare_and_store_ret64( (p), (cv), (sv) )
-
-# define KMP_XCHG_FIXED8(p, v) __kmp_xchg_fixed8( (volatile kmp_int8*)(p), (kmp_int8)(v) );
-# define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16( (p), (v) );
-//# define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32( (p), (v) );
-//# define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64( (p), (v) );
-//# define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32( (p), (v) );
-# define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64( (p), (v) );
+extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
+extern kmp_int32 __kmp_test_then_or32(volatile kmp_int32 *p, kmp_int32 v);
+extern kmp_int32 __kmp_test_then_and32(volatile kmp_int32 *p, kmp_int32 v);
+extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
+extern kmp_int64 __kmp_test_then_or64(volatile kmp_int64 *p, kmp_int64 v);
+extern kmp_int64 __kmp_test_then_and64(volatile kmp_int64 *p, kmp_int64 v);
+
+extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
+ kmp_int8 sv);
+extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
+ kmp_int16 sv);
+extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
+ kmp_int32 sv);
+extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
+ kmp_int64 sv);
+extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
+ kmp_int8 sv);
+extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
+ kmp_int16 cv, kmp_int16 sv);
+extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
+ kmp_int32 cv, kmp_int32 sv);
+extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
+ kmp_int64 cv, kmp_int64 sv);
+
+extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
+extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
+extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
+extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
+extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
+extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
+#define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v))
+
+//# define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32( (p), 1
+//)
+#define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v))
+#define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v))
+//# define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32( (p), 1
+//)
+#define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL)
+#define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL)
+//# define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32( (p), 4
+//)
+//# define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32( (p), 4
+//)
+#define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL)
+#define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL)
+//# define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32( (p), -1
+//)
+//# define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32( (p), -1
+//)
+#define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL)
+#define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL)
+//# define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32( (p),
+//(v) )
+#define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v))
+
+#define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v))
+#define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v))
+#define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v))
+#define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v))
+
+#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
+ __kmp_compare_and_store8((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
+ __kmp_compare_and_store8((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
+ __kmp_compare_and_store16((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
+ __kmp_compare_and_store16((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
+ __kmp_compare_and_store32((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
+ __kmp_compare_and_store32((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
+ __kmp_compare_and_store64((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
+ __kmp_compare_and_store64((p), (cv), (sv))
+#if KMP_ARCH_X86
+#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
+ __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
+ (kmp_int32)(sv))
+#else /* 64 bit pointers */
+#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
+ __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
+ (kmp_int64)(sv))
+#endif /* KMP_ARCH_X86 */
+
+#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
+ __kmp_compare_and_store_ret8((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
+ __kmp_compare_and_store_ret16((p), (cv), (sv))
+//# define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) __kmp_compare_and_store_ret32(
+//(p), (cv), (sv) )
+#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
+ __kmp_compare_and_store_ret64((p), (cv), (sv))
+
+#define KMP_XCHG_FIXED8(p, v) \
+ __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
+#define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
+//# define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32( (p), (v)
+//);
+//# define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64( (p), (v)
+//);
+//# define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32( (p), (v) );
+#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
#elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
-# define KMP_TEST_THEN_ADD8(p, v) __sync_fetch_and_add( (kmp_int8 *)(p), (v) )
+#define KMP_TEST_THEN_ADD8(p, v) __sync_fetch_and_add((kmp_int8 *)(p), (v))
/* cast p to correct type so that proper intrinsic will be used */
-# define KMP_TEST_THEN_INC32(p) __sync_fetch_and_add( (kmp_int32 *)(p), 1 )
-# define KMP_TEST_THEN_OR8(p, v) __sync_fetch_and_or( (kmp_int8 *)(p), (v) )
-# define KMP_TEST_THEN_AND8(p, v) __sync_fetch_and_and( (kmp_int8 *)(p), (v) )
-# define KMP_TEST_THEN_INC_ACQ32(p) __sync_fetch_and_add( (kmp_int32 *)(p), 1 )
-# define KMP_TEST_THEN_INC64(p) __sync_fetch_and_add( (kmp_int64 *)(p), 1LL )
-# define KMP_TEST_THEN_INC_ACQ64(p) __sync_fetch_and_add( (kmp_int64 *)(p), 1LL )
-# define KMP_TEST_THEN_ADD4_32(p) __sync_fetch_and_add( (kmp_int32 *)(p), 4 )
-# define KMP_TEST_THEN_ADD4_ACQ32(p) __sync_fetch_and_add( (kmp_int32 *)(p), 4 )
-# define KMP_TEST_THEN_ADD4_64(p) __sync_fetch_and_add( (kmp_int64 *)(p), 4LL )
-# define KMP_TEST_THEN_ADD4_ACQ64(p) __sync_fetch_and_add( (kmp_int64 *)(p), 4LL )
-# define KMP_TEST_THEN_DEC32(p) __sync_fetch_and_sub( (kmp_int32 *)(p), 1 )
-# define KMP_TEST_THEN_DEC_ACQ32(p) __sync_fetch_and_sub( (kmp_int32 *)(p), 1 )
-# define KMP_TEST_THEN_DEC64(p) __sync_fetch_and_sub( (kmp_int64 *)(p), 1LL )
-# define KMP_TEST_THEN_DEC_ACQ64(p) __sync_fetch_and_sub( (kmp_int64 *)(p), 1LL )
-# define KMP_TEST_THEN_ADD32(p, v) __sync_fetch_and_add( (kmp_int32 *)(p), (v) )
-# define KMP_TEST_THEN_ADD64(p, v) __sync_fetch_and_add( (kmp_int64 *)(p), (v) )
-
-# define KMP_TEST_THEN_OR32(p, v) __sync_fetch_and_or( (kmp_int32 *)(p), (v) )
-# define KMP_TEST_THEN_AND32(p, v) __sync_fetch_and_and( (kmp_int32 *)(p), (v) )
-# define KMP_TEST_THEN_OR64(p, v) __sync_fetch_and_or( (kmp_int64 *)(p), (v) )
-# define KMP_TEST_THEN_AND64(p, v) __sync_fetch_and_and( (kmp_int64 *)(p), (v) )
-
-# define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) __sync_bool_compare_and_swap( (volatile kmp_uint8 *)(p),(kmp_uint8)(cv),(kmp_uint8)(sv) )
-# define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) __sync_bool_compare_and_swap( (volatile kmp_uint8 *)(p),(kmp_uint8)(cv),(kmp_uint8)(sv) )
-# define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) __sync_bool_compare_and_swap( (volatile kmp_uint16 *)(p),(kmp_uint16)(cv),(kmp_uint16)(sv) )
-# define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) __sync_bool_compare_and_swap( (volatile kmp_uint16 *)(p),(kmp_uint16)(cv),(kmp_uint16)(sv) )
-# define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) __sync_bool_compare_and_swap( (volatile kmp_uint32 *)(p),(kmp_uint32)(cv),(kmp_uint32)(sv) )
-# define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) __sync_bool_compare_and_swap( (volatile kmp_uint32 *)(p),(kmp_uint32)(cv),(kmp_uint32)(sv) )
-# define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) __sync_bool_compare_and_swap( (volatile kmp_uint64 *)(p),(kmp_uint64)(cv),(kmp_uint64)(sv) )
-# define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) __sync_bool_compare_and_swap( (volatile kmp_uint64 *)(p),(kmp_uint64)(cv),(kmp_uint64)(sv) )
-# define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) __sync_bool_compare_and_swap( (volatile void **)(p),(void *)(cv),(void *)(sv) )
-
-# define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) __sync_val_compare_and_swap( (volatile kmp_uint8 *)(p),(kmp_uint8)(cv),(kmp_uint8)(sv) )
-# define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) __sync_val_compare_and_swap( (volatile kmp_uint16 *)(p),(kmp_uint16)(cv),(kmp_uint16)(sv) )
-# define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) __sync_val_compare_and_swap( (volatile kmp_uint32 *)(p),(kmp_uint32)(cv),(kmp_uint32)(sv) )
-# define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) __sync_val_compare_and_swap( (volatile kmp_uint64 *)(p),(kmp_uint64)(cv),(kmp_uint64)(sv) )
-
-#define KMP_XCHG_FIXED8(p, v) __sync_lock_test_and_set( (volatile kmp_uint8 *)(p), (kmp_uint8)(v) )
-#define KMP_XCHG_FIXED16(p, v) __sync_lock_test_and_set( (volatile kmp_uint16 *)(p), (kmp_uint16)(v) )
-#define KMP_XCHG_FIXED32(p, v) __sync_lock_test_and_set( (volatile kmp_uint32 *)(p), (kmp_uint32)(v) )
-#define KMP_XCHG_FIXED64(p, v) __sync_lock_test_and_set( (volatile kmp_uint64 *)(p), (kmp_uint64)(v) )
-
-extern kmp_int8 __kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 v );
-extern kmp_int8 __kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 v );
-extern kmp_int8 __kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 v );
-inline kmp_real32 KMP_XCHG_REAL32( volatile kmp_real32 *p, kmp_real32 v)
-{
- kmp_int32 tmp = __sync_lock_test_and_set( (kmp_int32*)p, *(kmp_int32*)&v);
- return *(kmp_real32*)&tmp;
+#define KMP_TEST_THEN_INC32(p) __sync_fetch_and_add((kmp_int32 *)(p), 1)
+#define KMP_TEST_THEN_OR8(p, v) __sync_fetch_and_or((kmp_int8 *)(p), (v))
+#define KMP_TEST_THEN_AND8(p, v) __sync_fetch_and_and((kmp_int8 *)(p), (v))
+#define KMP_TEST_THEN_INC_ACQ32(p) __sync_fetch_and_add((kmp_int32 *)(p), 1)
+#define KMP_TEST_THEN_INC64(p) __sync_fetch_and_add((kmp_int64 *)(p), 1LL)
+#define KMP_TEST_THEN_INC_ACQ64(p) __sync_fetch_and_add((kmp_int64 *)(p), 1LL)
+#define KMP_TEST_THEN_ADD4_32(p) __sync_fetch_and_add((kmp_int32 *)(p), 4)
+#define KMP_TEST_THEN_ADD4_ACQ32(p) __sync_fetch_and_add((kmp_int32 *)(p), 4)
+#define KMP_TEST_THEN_ADD4_64(p) __sync_fetch_and_add((kmp_int64 *)(p), 4LL)
+#define KMP_TEST_THEN_ADD4_ACQ64(p) __sync_fetch_and_add((kmp_int64 *)(p), 4LL)
+#define KMP_TEST_THEN_DEC32(p) __sync_fetch_and_sub((kmp_int32 *)(p), 1)
+#define KMP_TEST_THEN_DEC_ACQ32(p) __sync_fetch_and_sub((kmp_int32 *)(p), 1)
+#define KMP_TEST_THEN_DEC64(p) __sync_fetch_and_sub((kmp_int64 *)(p), 1LL)
+#define KMP_TEST_THEN_DEC_ACQ64(p) __sync_fetch_and_sub((kmp_int64 *)(p), 1LL)
+#define KMP_TEST_THEN_ADD32(p, v) __sync_fetch_and_add((kmp_int32 *)(p), (v))
+#define KMP_TEST_THEN_ADD64(p, v) __sync_fetch_and_add((kmp_int64 *)(p), (v))
+
+#define KMP_TEST_THEN_OR32(p, v) __sync_fetch_and_or((kmp_int32 *)(p), (v))
+#define KMP_TEST_THEN_AND32(p, v) __sync_fetch_and_and((kmp_int32 *)(p), (v))
+#define KMP_TEST_THEN_OR64(p, v) __sync_fetch_and_or((kmp_int64 *)(p), (v))
+#define KMP_TEST_THEN_AND64(p, v) __sync_fetch_and_and((kmp_int64 *)(p), (v))
+
+#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
+ (kmp_uint8)(sv))
+#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
+ (kmp_uint8)(sv))
+#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
+ (kmp_uint16)(sv))
+#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
+ (kmp_uint16)(sv))
+#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
+ (kmp_uint32)(sv))
+#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
+ (kmp_uint32)(sv))
+#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
+ (kmp_uint64)(sv))
+#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
+ (kmp_uint64)(sv))
+#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile void **)(p), (void *)(cv), \
+ (void *)(sv))
+
+#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
+ __sync_val_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
+ (kmp_uint8)(sv))
+#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
+ __sync_val_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
+ (kmp_uint16)(sv))
+#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
+ __sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
+ (kmp_uint32)(sv))
+#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
+ __sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
+ (kmp_uint64)(sv))
+
+#define KMP_XCHG_FIXED8(p, v) \
+ __sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v))
+#define KMP_XCHG_FIXED16(p, v) \
+ __sync_lock_test_and_set((volatile kmp_uint16 *)(p), (kmp_uint16)(v))
+#define KMP_XCHG_FIXED32(p, v) \
+ __sync_lock_test_and_set((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
+#define KMP_XCHG_FIXED64(p, v) \
+ __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
+
+extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
+extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
+extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
+inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
+ kmp_int32 tmp = __sync_lock_test_and_set((kmp_int32 *)p, *(kmp_int32 *)&v);
+ return *(kmp_real32 *)&tmp;
}
-inline kmp_real64 KMP_XCHG_REAL64( volatile kmp_real64 *p, kmp_real64 v)
-{
- kmp_int64 tmp = __sync_lock_test_and_set( (kmp_int64*)p, *(kmp_int64*)&v);
- return *(kmp_real64*)&tmp;
+inline kmp_real64 KMP_XCHG_REAL64(volatile kmp_real64 *p, kmp_real64 v) {
+ kmp_int64 tmp = __sync_lock_test_and_set((kmp_int64 *)p, *(kmp_int64 *)&v);
+ return *(kmp_real64 *)&tmp;
}
#else
-extern kmp_int32 __kmp_test_then_add32( volatile kmp_int32 *p, kmp_int32 v );
-extern kmp_int32 __kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 v );
-extern kmp_int32 __kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 v );
-extern kmp_int64 __kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 v );
-extern kmp_int64 __kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 v );
-extern kmp_int64 __kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 v );
-
-extern kmp_int8 __kmp_compare_and_store8( volatile kmp_int8 *p, kmp_int8 cv, kmp_int8 sv );
-extern kmp_int16 __kmp_compare_and_store16( volatile kmp_int16 *p, kmp_int16 cv, kmp_int16 sv );
-extern kmp_int32 __kmp_compare_and_store32( volatile kmp_int32 *p, kmp_int32 cv, kmp_int32 sv );
-extern kmp_int32 __kmp_compare_and_store64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
-extern kmp_int8 __kmp_compare_and_store_ret8( volatile kmp_int8 *p, kmp_int8 cv, kmp_int8 sv );
-extern kmp_int16 __kmp_compare_and_store_ret16( volatile kmp_int16 *p, kmp_int16 cv, kmp_int16 sv );
-extern kmp_int32 __kmp_compare_and_store_ret32( volatile kmp_int32 *p, kmp_int32 cv, kmp_int32 sv );
-extern kmp_int64 __kmp_compare_and_store_ret64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
-
-extern kmp_int8 __kmp_xchg_fixed8( volatile kmp_int8 *p, kmp_int8 v );
-extern kmp_int16 __kmp_xchg_fixed16( volatile kmp_int16 *p, kmp_int16 v );
-extern kmp_int32 __kmp_xchg_fixed32( volatile kmp_int32 *p, kmp_int32 v );
-extern kmp_int64 __kmp_xchg_fixed64( volatile kmp_int64 *p, kmp_int64 v );
-extern kmp_real32 __kmp_xchg_real32( volatile kmp_real32 *p, kmp_real32 v );
-# define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8( (p), (v) )
-extern kmp_real64 __kmp_xchg_real64( volatile kmp_real64 *p, kmp_real64 v );
-
-# define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32( (p), 1 )
-# define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8( (p), (v) )
-# define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8( (p), (v) )
-# define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32( (p), 1 )
-# define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64( (p), 1LL )
-# define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64( (p), 1LL )
-# define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32( (p), 4 )
-# define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32( (p), 4 )
-# define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64( (p), 4LL )
-# define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64( (p), 4LL )
-# define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32( (p), -1 )
-# define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32( (p), -1 )
-# define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64( (p), -1LL )
-# define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64( (p), -1LL )
-# define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32( (p), (v) )
-# define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64( (p), (v) )
-
-# define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32( (p), (v) )
-# define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32( (p), (v) )
-# define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64( (p), (v) )
-# define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64( (p), (v) )
-
-# define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) __kmp_compare_and_store8( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) __kmp_compare_and_store8( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) __kmp_compare_and_store16( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) __kmp_compare_and_store16( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) __kmp_compare_and_store32( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) __kmp_compare_and_store32( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) __kmp_compare_and_store64( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) __kmp_compare_and_store64( (p), (cv), (sv) )
-
-# if KMP_ARCH_X86
-# define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) __kmp_compare_and_store32( (volatile kmp_int32*)(p), (kmp_int32)(cv), (kmp_int32)(sv) )
-# else /* 64 bit pointers */
-# define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) __kmp_compare_and_store64( (volatile kmp_int64*)(p), (kmp_int64)(cv), (kmp_int64)(sv) )
-# endif /* KMP_ARCH_X86 */
-
-# define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) __kmp_compare_and_store_ret8( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) __kmp_compare_and_store_ret16( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) __kmp_compare_and_store_ret32( (p), (cv), (sv) )
-# define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) __kmp_compare_and_store_ret64( (p), (cv), (sv) )
-
-# define KMP_XCHG_FIXED8(p, v) __kmp_xchg_fixed8( (volatile kmp_int8*)(p), (kmp_int8)(v) );
-# define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16( (p), (v) );
-# define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32( (p), (v) );
-# define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64( (p), (v) );
-# define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32( (p), (v) );
-# define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64( (p), (v) );
+extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
+extern kmp_int32 __kmp_test_then_or32(volatile kmp_int32 *p, kmp_int32 v);
+extern kmp_int32 __kmp_test_then_and32(volatile kmp_int32 *p, kmp_int32 v);
+extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
+extern kmp_int64 __kmp_test_then_or64(volatile kmp_int64 *p, kmp_int64 v);
+extern kmp_int64 __kmp_test_then_and64(volatile kmp_int64 *p, kmp_int64 v);
+
+extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
+ kmp_int8 sv);
+extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
+ kmp_int16 sv);
+extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
+ kmp_int32 sv);
+extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
+ kmp_int64 sv);
+extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
+ kmp_int8 sv);
+extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
+ kmp_int16 cv, kmp_int16 sv);
+extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
+ kmp_int32 cv, kmp_int32 sv);
+extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
+ kmp_int64 cv, kmp_int64 sv);
+
+extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
+extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
+extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
+extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
+extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
+#define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v))
+extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
+
+#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1)
+#define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v))
+#define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v))
+#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1)
+#define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL)
+#define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL)
+#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4)
+#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4)
+#define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL)
+#define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL)
+#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1)
+#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1)
+#define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL)
+#define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL)
+#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v))
+#define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v))
+
+#define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v))
+#define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v))
+#define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v))
+#define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v))
+
+#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
+ __kmp_compare_and_store8((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
+ __kmp_compare_and_store8((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
+ __kmp_compare_and_store16((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
+ __kmp_compare_and_store16((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
+ __kmp_compare_and_store32((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
+ __kmp_compare_and_store32((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
+ __kmp_compare_and_store64((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
+ __kmp_compare_and_store64((p), (cv), (sv))
-#endif /* KMP_ASM_INTRINS */
+#if KMP_ARCH_X86
+#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
+ __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
+ (kmp_int32)(sv))
+#else /* 64 bit pointers */
+#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
+ __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
+ (kmp_int64)(sv))
+#endif /* KMP_ARCH_X86 */
+#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
+ __kmp_compare_and_store_ret8((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
+ __kmp_compare_and_store_ret16((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
+ __kmp_compare_and_store_ret32((p), (cv), (sv))
+#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
+ __kmp_compare_and_store_ret64((p), (cv), (sv))
+
+#define KMP_XCHG_FIXED8(p, v) \
+ __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
+#define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
+#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
+#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
+#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
+#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
+
+#endif /* KMP_ASM_INTRINS */
/* ------------- relaxed consistency memory model stuff ------------------ */
#if KMP_OS_WINDOWS
-# ifdef __ABSOFT_WIN
-# define KMP_MB() asm ("nop")
-# define KMP_IMB() asm ("nop")
-# else
-# define KMP_MB() /* _asm{ nop } */
-# define KMP_IMB() /* _asm{ nop } */
-# endif
+#ifdef __ABSOFT_WIN
+#define KMP_MB() asm("nop")
+#define KMP_IMB() asm("nop")
+#else
+#define KMP_MB() /* _asm{ nop } */
+#define KMP_IMB() /* _asm{ nop } */
+#endif
#endif /* KMP_OS_WINDOWS */
-#if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || KMP_ARCH_MIPS64
-# define KMP_MB() __sync_synchronize()
+#if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \
+ KMP_ARCH_MIPS64
+#define KMP_MB() __sync_synchronize()
#endif
#ifndef KMP_MB
-# define KMP_MB() /* nothing to do */
+#define KMP_MB() /* nothing to do */
#endif
#ifndef KMP_IMB
-# define KMP_IMB() /* nothing to do */
+#define KMP_IMB() /* nothing to do */
#endif
#ifndef KMP_ST_REL32
-# define KMP_ST_REL32(A,D) ( *(A) = (D) )
+#define KMP_ST_REL32(A, D) (*(A) = (D))
#endif
#ifndef KMP_ST_REL64
-# define KMP_ST_REL64(A,D) ( *(A) = (D) )
+#define KMP_ST_REL64(A, D) (*(A) = (D))
#endif
#ifndef KMP_LD_ACQ32
-# define KMP_LD_ACQ32(A) ( *(A) )
+#define KMP_LD_ACQ32(A) (*(A))
#endif
#ifndef KMP_LD_ACQ64
-# define KMP_LD_ACQ64(A) ( *(A) )
+#define KMP_LD_ACQ64(A) (*(A))
#endif
-#define TCR_1(a) (a)
-#define TCW_1(a,b) (a) = (b)
+#define TCR_1(a) (a)
+#define TCW_1(a, b) (a) = (b)
/* ------------------------------------------------------------------------ */
-//
// FIXME - maybe this should this be
//
// #define TCR_4(a) (*(volatile kmp_int32 *)(&a))
@@ -608,76 +688,77 @@ extern kmp_real64 __kmp_xchg_real64( vol
//
// I'm fairly certain this is the correct thing to do, but I'm afraid
// of performance regressions.
-//
-#define TCR_4(a) (a)
-#define TCW_4(a,b) (a) = (b)
-#define TCI_4(a) (++(a))
-#define TCD_4(a) (--(a))
-#define TCR_8(a) (a)
-#define TCW_8(a,b) (a) = (b)
-#define TCI_8(a) (++(a))
-#define TCD_8(a) (--(a))
-#define TCR_SYNC_4(a) (a)
-#define TCW_SYNC_4(a,b) (a) = (b)
-#define TCX_SYNC_4(a,b,c) KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), (kmp_int32)(b), (kmp_int32)(c))
-#define TCR_SYNC_8(a) (a)
-#define TCW_SYNC_8(a,b) (a) = (b)
-#define TCX_SYNC_8(a,b,c) KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), (kmp_int64)(b), (kmp_int64)(c))
+#define TCR_4(a) (a)
+#define TCW_4(a, b) (a) = (b)
+#define TCI_4(a) (++(a))
+#define TCD_4(a) (--(a))
+#define TCR_8(a) (a)
+#define TCW_8(a, b) (a) = (b)
+#define TCI_8(a) (++(a))
+#define TCD_8(a) (--(a))
+#define TCR_SYNC_4(a) (a)
+#define TCW_SYNC_4(a, b) (a) = (b)
+#define TCX_SYNC_4(a, b, c) \
+ KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), \
+ (kmp_int32)(b), (kmp_int32)(c))
+#define TCR_SYNC_8(a) (a)
+#define TCW_SYNC_8(a, b) (a) = (b)
+#define TCX_SYNC_8(a, b, c) \
+ KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \
+ (kmp_int64)(b), (kmp_int64)(c))
#if KMP_ARCH_X86 || KMP_ARCH_MIPS
// What about ARM?
- #define TCR_PTR(a) ((void *)TCR_4(a))
- #define TCW_PTR(a,b) TCW_4((a),(b))
- #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a))
- #define TCW_SYNC_PTR(a,b) TCW_SYNC_4((a),(b))
- #define TCX_SYNC_PTR(a,b,c) ((void *)TCX_SYNC_4((a),(b),(c)))
+#define TCR_PTR(a) ((void *)TCR_4(a))
+#define TCW_PTR(a, b) TCW_4((a), (b))
+#define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a))
+#define TCW_SYNC_PTR(a, b) TCW_SYNC_4((a), (b))
+#define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_4((a), (b), (c)))
#else /* 64 bit pointers */
- #define TCR_PTR(a) ((void *)TCR_8(a))
- #define TCW_PTR(a,b) TCW_8((a),(b))
- #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a))
- #define TCW_SYNC_PTR(a,b) TCW_SYNC_8((a),(b))
- #define TCX_SYNC_PTR(a,b,c) ((void *)TCX_SYNC_8((a),(b),(c)))
+#define TCR_PTR(a) ((void *)TCR_8(a))
+#define TCW_PTR(a, b) TCW_8((a), (b))
+#define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a))
+#define TCW_SYNC_PTR(a, b) TCW_SYNC_8((a), (b))
+#define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_8((a), (b), (c)))
#endif /* KMP_ARCH_X86 */
-/*
- * If these FTN_{TRUE,FALSE} values change, may need to
- * change several places where they are used to check that
- * language is Fortran, not C.
- */
+/* If these FTN_{TRUE,FALSE} values change, may need to change several places
+ where they are used to check that language is Fortran, not C. */
#ifndef FTN_TRUE
-# define FTN_TRUE TRUE
+#define FTN_TRUE TRUE
#endif
#ifndef FTN_FALSE
-# define FTN_FALSE FALSE
+#define FTN_FALSE FALSE
#endif
-typedef void (*microtask_t)( int *gtid, int *npr, ... );
+typedef void (*microtask_t)(int *gtid, int *npr, ...);
#ifdef USE_VOLATILE_CAST
-# define VOLATILE_CAST(x) (volatile x)
+#define VOLATILE_CAST(x) (volatile x)
#else
-# define VOLATILE_CAST(x) (x)
+#define VOLATILE_CAST(x) (x)
#endif
-#define KMP_WAIT_YIELD __kmp_wait_yield_4
-#define KMP_WAIT_YIELD_PTR __kmp_wait_yield_4_ptr
-#define KMP_EQ __kmp_eq_4
-#define KMP_NEQ __kmp_neq_4
-#define KMP_LT __kmp_lt_4
-#define KMP_GE __kmp_ge_4
-#define KMP_LE __kmp_le_4
+#define KMP_WAIT_YIELD __kmp_wait_yield_4
+#define KMP_WAIT_YIELD_PTR __kmp_wait_yield_4_ptr
+#define KMP_EQ __kmp_eq_4
+#define KMP_NEQ __kmp_neq_4
+#define KMP_LT __kmp_lt_4
+#define KMP_GE __kmp_ge_4
+#define KMP_LE __kmp_le_4
-/* Workaround for Intel(R) 64 code gen bug when taking address of static array (Intel(R) 64 Tracker #138) */
+/* Workaround for Intel(R) 64 code gen bug when taking address of static array
+ * (Intel(R) 64 Tracker #138) */
#if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX
-# define STATIC_EFI2_WORKAROUND
+#define STATIC_EFI2_WORKAROUND
#else
-# define STATIC_EFI2_WORKAROUND static
+#define STATIC_EFI2_WORKAROUND static
#endif
// Support of BGET usage
@@ -688,38 +769,39 @@ typedef void (*microtask_t)( int *gti
// Switches for OSS builds
#ifndef USE_SYSFS_INFO
-# define USE_SYSFS_INFO 0
+#define USE_SYSFS_INFO 0
#endif
#ifndef USE_CMPXCHG_FIX
-# define USE_CMPXCHG_FIX 1
+#define USE_CMPXCHG_FIX 1
#endif
// Enable dynamic user lock
#if OMP_45_ENABLED
-# define KMP_USE_DYNAMIC_LOCK 1
+#define KMP_USE_DYNAMIC_LOCK 1
#endif
// Enable TSX if dynamic user lock is turned on
#if KMP_USE_DYNAMIC_LOCK
// Visual studio can't handle the asm sections in this code
-# define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC
-# ifdef KMP_USE_ADAPTIVE_LOCKS
-# undef KMP_USE_ADAPTIVE_LOCKS
-# endif
-# define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX
+#define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC
+#ifdef KMP_USE_ADAPTIVE_LOCKS
+#undef KMP_USE_ADAPTIVE_LOCKS
+#endif
+#define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX
#endif
// Enable tick time conversion of ticks to seconds
#if KMP_STATS_ENABLED
-# define KMP_HAVE_TICK_TIME (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64))
+#define KMP_HAVE_TICK_TIME \
+ (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64))
#endif
// Warning levels
enum kmp_warnings_level {
- kmp_warnings_off = 0, /* No warnings */
- kmp_warnings_low, /* Minimal warnings (default) */
- kmp_warnings_explicit = 6, /* Explicitly set to ON - more warnings */
- kmp_warnings_verbose /* reserved */
+ kmp_warnings_off = 0, /* No warnings */
+ kmp_warnings_low, /* Minimal warnings (default) */
+ kmp_warnings_explicit = 6, /* Explicitly set to ON - more warnings */
+ kmp_warnings_verbose /* reserved */
};
#ifdef __cplusplus
@@ -729,4 +811,3 @@ enum kmp_warnings_level {
#endif /* KMP_OS_H */
// Safe C API
#include "kmp_safe_c_api.h"
-
Modified: openmp/trunk/runtime/src/kmp_platform.h
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_platform.h?rev=302929&r1=302928&r2=302929&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_platform.h (original)
+++ openmp/trunk/runtime/src/kmp_platform.h Fri May 12 13:01:32 2017
@@ -2,6 +2,7 @@
* kmp_platform.h -- header for determining operating system and architecture
*/
+
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
@@ -11,171 +12,175 @@
//
//===----------------------------------------------------------------------===//
+
#ifndef KMP_PLATFORM_H
#define KMP_PLATFORM_H
/* ---------------------- Operating system recognition ------------------- */
-#define KMP_OS_LINUX 0
-#define KMP_OS_FREEBSD 0
-#define KMP_OS_NETBSD 0
-#define KMP_OS_DARWIN 0
-#define KMP_OS_WINDOWS 0
-#define KMP_OS_CNK 0
-#define KMP_OS_UNIX 0 /* disjunction of KMP_OS_LINUX, KMP_OS_DARWIN etc. */
-
+#define KMP_OS_LINUX 0
+#define KMP_OS_FREEBSD 0
+#define KMP_OS_NETBSD 0
+#define KMP_OS_DARWIN 0
+#define KMP_OS_WINDOWS 0
+#define KMP_OS_CNK 0
+#define KMP_OS_UNIX 0 /* disjunction of KMP_OS_LINUX, KMP_OS_DARWIN etc. */
#ifdef _WIN32
-# undef KMP_OS_WINDOWS
-# define KMP_OS_WINDOWS 1
+#undef KMP_OS_WINDOWS
+#define KMP_OS_WINDOWS 1
#endif
-#if ( defined __APPLE__ && defined __MACH__ )
-# undef KMP_OS_DARWIN
-# define KMP_OS_DARWIN 1
+#if (defined __APPLE__ && defined __MACH__)
+#undef KMP_OS_DARWIN
+#define KMP_OS_DARWIN 1
#endif
// in some ppc64 linux installations, only the second condition is met
-#if ( defined __linux )
-# undef KMP_OS_LINUX
-# define KMP_OS_LINUX 1
-#elif ( defined __linux__)
-# undef KMP_OS_LINUX
-# define KMP_OS_LINUX 1
+#if (defined __linux)
+#undef KMP_OS_LINUX
+#define KMP_OS_LINUX 1
+#elif (defined __linux__)
+#undef KMP_OS_LINUX
+#define KMP_OS_LINUX 1
#else
#endif
-#if ( defined __FreeBSD__ )
-# undef KMP_OS_FREEBSD
-# define KMP_OS_FREEBSD 1
+#if (defined __FreeBSD__)
+#undef KMP_OS_FREEBSD
+#define KMP_OS_FREEBSD 1
#endif
-#if ( defined __NetBSD__ )
-# undef KMP_OS_NETBSD
-# define KMP_OS_NETBSD 1
+#if (defined __NetBSD__)
+#undef KMP_OS_NETBSD
+#define KMP_OS_NETBSD 1
#endif
-#if ( defined __bgq__ )
-# undef KMP_OS_CNK
-# define KMP_OS_CNK 1
+#if (defined __bgq__)
+#undef KMP_OS_CNK
+#define KMP_OS_CNK 1
#endif
-#if (1 != KMP_OS_LINUX + KMP_OS_FREEBSD + KMP_OS_NETBSD + KMP_OS_DARWIN + KMP_OS_WINDOWS)
-# error Unknown OS
+#if (1 != \
+ KMP_OS_LINUX + KMP_OS_FREEBSD + KMP_OS_NETBSD + KMP_OS_DARWIN + \
+ KMP_OS_WINDOWS)
+#error Unknown OS
#endif
#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DARWIN
-# undef KMP_OS_UNIX
-# define KMP_OS_UNIX 1
+#undef KMP_OS_UNIX
+#define KMP_OS_UNIX 1
#endif
/* ---------------------- Architecture recognition ------------------- */
-#define KMP_ARCH_X86 0
-#define KMP_ARCH_X86_64 0
-#define KMP_ARCH_AARCH64 0
-#define KMP_ARCH_PPC64_BE 0
-#define KMP_ARCH_PPC64_LE 0
+#define KMP_ARCH_X86 0
+#define KMP_ARCH_X86_64 0
+#define KMP_ARCH_AARCH64 0
+#define KMP_ARCH_PPC64_BE 0
+#define KMP_ARCH_PPC64_LE 0
#define KMP_ARCH_PPC64 (KMP_ARCH_PPC64_LE || KMP_ARCH_PPC64_BE)
-#define KMP_ARCH_MIPS 0
-#define KMP_ARCH_MIPS64 0
+#define KMP_ARCH_MIPS 0
+#define KMP_ARCH_MIPS64 0
#if KMP_OS_WINDOWS
-# if defined _M_AMD64
-# undef KMP_ARCH_X86_64
-# define KMP_ARCH_X86_64 1
-# else
-# undef KMP_ARCH_X86
-# define KMP_ARCH_X86 1
-# endif
+#if defined _M_AMD64
+#undef KMP_ARCH_X86_64
+#define KMP_ARCH_X86_64 1
+#else
+#undef KMP_ARCH_X86
+#define KMP_ARCH_X86 1
+#endif
#endif
#if KMP_OS_UNIX
-# if defined __x86_64
-# undef KMP_ARCH_X86_64
-# define KMP_ARCH_X86_64 1
-# elif defined __i386
-# undef KMP_ARCH_X86
-# define KMP_ARCH_X86 1
-# elif defined __powerpc64__
-# if defined __LITTLE_ENDIAN__
-# undef KMP_ARCH_PPC64_LE
-# define KMP_ARCH_PPC64_LE 1
-# else
-# undef KMP_ARCH_PPC64_BE
-# define KMP_ARCH_PPC64_BE 1
-# endif
-# elif defined __aarch64__
-# undef KMP_ARCH_AARCH64
-# define KMP_ARCH_AARCH64 1
-# elif defined __mips__
-# if defined __mips64
-# undef KMP_ARCH_MIPS64
-# define KMP_ARCH_MIPS64 1
-# else
-# undef KMP_ARCH_MIPS
-# define KMP_ARCH_MIPS 1
-# endif
-# endif
+#if defined __x86_64
+#undef KMP_ARCH_X86_64
+#define KMP_ARCH_X86_64 1
+#elif defined __i386
+#undef KMP_ARCH_X86
+#define KMP_ARCH_X86 1
+#elif defined __powerpc64__
+#if defined __LITTLE_ENDIAN__
+#undef KMP_ARCH_PPC64_LE
+#define KMP_ARCH_PPC64_LE 1
+#else
+#undef KMP_ARCH_PPC64_BE
+#define KMP_ARCH_PPC64_BE 1
+#endif
+#elif defined __aarch64__
+#undef KMP_ARCH_AARCH64
+#define KMP_ARCH_AARCH64 1
+#elif defined __mips__
+#if defined __mips64
+#undef KMP_ARCH_MIPS64
+#define KMP_ARCH_MIPS64 1
+#else
+#undef KMP_ARCH_MIPS
+#define KMP_ARCH_MIPS 1
+#endif
+#endif
#endif
-#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7R__) || \
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7A__)
-# define KMP_ARCH_ARMV7 1
+#define KMP_ARCH_ARMV7 1
#endif
-#if defined(KMP_ARCH_ARMV7) || defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || \
- defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6T2__) || \
+#if defined(KMP_ARCH_ARMV7) || defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || \
+ defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6T2__) || \
defined(__ARM_ARCH_6ZK__)
-# define KMP_ARCH_ARMV6 1
+#define KMP_ARCH_ARMV6 1
#endif
-#if defined(KMP_ARCH_ARMV6) || defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) || \
+#if defined(KMP_ARCH_ARMV6) || defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
-# define KMP_ARCH_ARMV5 1
+#define KMP_ARCH_ARMV5 1
#endif
-#if defined(KMP_ARCH_ARMV5) || defined(__ARM_ARCH_4__) || \
+#if defined(KMP_ARCH_ARMV5) || defined(__ARM_ARCH_4__) || \
defined(__ARM_ARCH_4T__)
-# define KMP_ARCH_ARMV4 1
+#define KMP_ARCH_ARMV4 1
#endif
-#if defined(KMP_ARCH_ARMV4) || defined(__ARM_ARCH_3__) || \
+#if defined(KMP_ARCH_ARMV4) || defined(__ARM_ARCH_3__) || \
defined(__ARM_ARCH_3M__)
-# define KMP_ARCH_ARMV3 1
+#define KMP_ARCH_ARMV3 1
#endif
-#if defined(KMP_ARCH_ARMV3) || defined(__ARM_ARCH_2__)
-# define KMP_ARCH_ARMV2 1
+#if defined(KMP_ARCH_ARMV3) || defined(__ARM_ARCH_2__)
+#define KMP_ARCH_ARMV2 1
#endif
#if defined(KMP_ARCH_ARMV2)
-# define KMP_ARCH_ARM 1
+#define KMP_ARCH_ARM 1
#endif
#if defined(__MIC__) || defined(__MIC2__)
-# define KMP_MIC 1
-# if __MIC2__ || __KNC__
-# define KMP_MIC1 0
-# define KMP_MIC2 1
-# else
-# define KMP_MIC1 1
-# define KMP_MIC2 0
-# endif
-#else
-# define KMP_MIC 0
-# define KMP_MIC1 0
-# define KMP_MIC2 0
+#define KMP_MIC 1
+#if __MIC2__ || __KNC__
+#define KMP_MIC1 0
+#define KMP_MIC2 1
+#else
+#define KMP_MIC1 1
+#define KMP_MIC2 0
+#endif
+#else
+#define KMP_MIC 0
+#define KMP_MIC1 0
+#define KMP_MIC2 0
#endif
/* Specify 32 bit architectures here */
#define KMP_32_BIT_ARCH (KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS)
// TODO: Fixme - This is clever, but really fugly
-#if (1 != KMP_ARCH_X86 + KMP_ARCH_X86_64 + KMP_ARCH_ARM + KMP_ARCH_PPC64 + KMP_ARCH_AARCH64 + KMP_ARCH_MIPS + KMP_ARCH_MIPS64)
-# error Unknown or unsupported architecture
+#if (1 != \
+ KMP_ARCH_X86 + KMP_ARCH_X86_64 + KMP_ARCH_ARM + KMP_ARCH_PPC64 + \
+ KMP_ARCH_AARCH64 + KMP_ARCH_MIPS + KMP_ARCH_MIPS64)
+#error Unknown or unsupported architecture
#endif
#endif // KMP_PLATFORM_H
More information about the Openmp-commits
mailing list