[Openmp-commits] [openmp] r254637 - Replace DYNA_* names with KMP_* names

Jonathan Peyton via Openmp-commits openmp-commits at lists.llvm.org
Thu Dec 3 11:37:21 PST 2015


Author: jlpeyton
Date: Thu Dec  3 13:37:20 2015
New Revision: 254637

URL: http://llvm.org/viewvc/llvm-project?rev=254637&view=rev
Log:
Replace DYNA_* names with KMP_* names

Modified:
    openmp/trunk/runtime/src/kmp_csupport.c
    openmp/trunk/runtime/src/kmp_itt.inl
    openmp/trunk/runtime/src/kmp_lock.cpp
    openmp/trunk/runtime/src/kmp_lock.h
    openmp/trunk/runtime/src/kmp_settings.c

Modified: openmp/trunk/runtime/src/kmp_csupport.c
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_csupport.c?rev=254637&r1=254636&r2=254637&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_csupport.c (original)
+++ openmp/trunk/runtime/src/kmp_csupport.c Thu Dec  3 13:37:20 2015
@@ -926,12 +926,12 @@ __kmp_get_indirect_csptr(kmp_critical_na
     ret = (kmp_indirect_lock_t *)TCR_PTR(*lck);
     if (ret == NULL) {
         void *idx;
-        kmp_indirect_locktag_t tag = DYNA_GET_I_TAG(seq);
+        kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
         kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag);
         ret = ilk;
-        DYNA_I_LOCK_FUNC(ilk, init)(ilk->lock);
-        DYNA_SET_I_LOCK_LOCATION(ilk, loc);
-        DYNA_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
+        KMP_I_LOCK_FUNC(ilk, init)(ilk->lock);
+        KMP_SET_I_LOCK_LOCATION(ilk, loc);
+        KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
         KA_TRACE(20, ("__kmp_get_indirect_csptr: initialized indirect lock #%d\n", tag));
 #if USE_ITT_BUILD
         __kmp_itt_critical_creating(ilk->lock, loc);
@@ -942,7 +942,7 @@ __kmp_get_indirect_csptr(kmp_critical_na
             __kmp_itt_critical_destroyed(ilk->lock);
 #endif
             // Postponing destroy, to avoid costly dispatch here.
-            //DYNA_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
+            //KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
             ret = (kmp_indirect_lock_t *)TCR_PTR(*lck);
             KMP_DEBUG_ASSERT(ret != NULL);
         }
@@ -951,10 +951,10 @@ __kmp_get_indirect_csptr(kmp_critical_na
 }
 
 // Fast-path acquire tas lock
-#define DYNA_ACQUIRE_TAS_LOCK(lock, gtid) {                                                                      \
+#define KMP_ACQUIRE_TAS_LOCK(lock, gtid) {                                                                       \
     kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock;                                                                  \
-    if (l->lk.poll != DYNA_LOCK_FREE(tas) ||                                                                     \
-            ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas))) {    \
+    if (l->lk.poll != KMP_LOCK_FREE(tas) ||                                                                      \
+            ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) {      \
         kmp_uint32 spins;                                                                                        \
         KMP_FSYNC_PREPARE(l);                                                                                    \
         KMP_INIT_YIELD(spins);                                                                                   \
@@ -963,8 +963,8 @@ __kmp_get_indirect_csptr(kmp_critical_na
         } else {                                                                                                 \
             KMP_YIELD_SPIN(spins);                                                                               \
         }                                                                                                        \
-        while (l->lk.poll != DYNA_LOCK_FREE(tas) ||                                                              \
-               ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas))) { \
+        while (l->lk.poll != KMP_LOCK_FREE(tas) ||                                                               \
+               ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) {   \
             if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {                        \
                 KMP_YIELD(TRUE);                                                                                 \
             } else {                                                                                             \
@@ -976,19 +976,19 @@ __kmp_get_indirect_csptr(kmp_critical_na
 }
 
 // Fast-path test tas lock
-#define DYNA_TEST_TAS_LOCK(lock, gtid, rc) {                                                           \
+#define KMP_TEST_TAS_LOCK(lock, gtid, rc) {                                                            \
     kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock;                                                        \
-    rc = l->lk.poll == DYNA_LOCK_FREE(tas) &&                                                          \
-         KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas)); \
+    rc = l->lk.poll == KMP_LOCK_FREE(tas) &&                                                           \
+         KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas));   \
 }
 
 // Fast-path release tas lock
-#define DYNA_RELEASE_TAS_LOCK(lock, gtid) {                         \
-    TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, DYNA_LOCK_FREE(tas));  \
+#define KMP_RELEASE_TAS_LOCK(lock, gtid) {                          \
+    TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas));   \
     KMP_MB();                                                       \
 }
 
-#if DYNA_HAS_FUTEX
+#if KMP_HAS_FUTEX
 
 # include <unistd.h>
 # include <sys/syscall.h>
@@ -1000,20 +1000,20 @@ __kmp_get_indirect_csptr(kmp_critical_na
 # endif
 
 // Fast-path acquire futex lock
-#define DYNA_ACQUIRE_FUTEX_LOCK(lock, gtid) {                                                                       \
+#define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) {                                                                        \
     kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock;                                                               \
     kmp_int32 gtid_code = (gtid+1) << 1;                                                                            \
     KMP_MB();                                                                                                       \
     KMP_FSYNC_PREPARE(ftx);                                                                                         \
     kmp_int32 poll_val;                                                                                             \
-    while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex),                          \
-                                                   DYNA_LOCK_BUSY(gtid_code, futex))) != DYNA_LOCK_FREE(futex)) {   \
-        kmp_int32 cond = DYNA_LOCK_STRIP(poll_val) & 1;                                                             \
+    while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex),                           \
+                                                   KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) {     \
+        kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;                                                              \
         if (!cond) {                                                                                                \
-            if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | DYNA_LOCK_BUSY(1, futex))) {     \
+            if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) {      \
                 continue;                                                                                           \
             }                                                                                                       \
-            poll_val |= DYNA_LOCK_BUSY(1, futex);                                                                   \
+            poll_val |= KMP_LOCK_BUSY(1, futex);                                                                    \
         }                                                                                                           \
         kmp_int32 rc;                                                                                               \
         if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) {                \
@@ -1025,9 +1025,9 @@ __kmp_get_indirect_csptr(kmp_critical_na
 }
 
 // Fast-path test futex lock
-#define DYNA_TEST_FUTEX_LOCK(lock, gtid, rc) {                                                                      \
+#define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) {                                                                       \
     kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock;                                                               \
-    if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex), DYNA_LOCK_BUSY(gtid+1, futex) << 1)) {  \
+    if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1, futex) << 1)) {    \
         KMP_FSYNC_ACQUIRED(ftx);                                                                                    \
         rc = TRUE;                                                                                                  \
     } else {                                                                                                        \
@@ -1036,19 +1036,19 @@ __kmp_get_indirect_csptr(kmp_critical_na
 }
 
 // Fast-path release futex lock
-#define DYNA_RELEASE_FUTEX_LOCK(lock, gtid) {                                                       \
+#define KMP_RELEASE_FUTEX_LOCK(lock, gtid) {                                                        \
     kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock;                                               \
     KMP_MB();                                                                                       \
     KMP_FSYNC_RELEASING(ftx);                                                                       \
-    kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex));                  \
-    if (DYNA_LOCK_STRIP(poll_val) & 1) {                                                            \
-        syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, DYNA_LOCK_BUSY(1, futex), NULL, NULL, 0);  \
+    kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex));                   \
+    if (KMP_LOCK_STRIP(poll_val) & 1) {                                                             \
+        syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0);   \
     }                                                                                               \
     KMP_MB();                                                                                       \
     KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));              \
 }
 
-#endif // DYNA_HAS_FUTEX
+#endif // KMP_HAS_FUTEX
 
 #else // KMP_USE_DYNAMIC_LOCK
 
@@ -1126,11 +1126,11 @@ __kmpc_critical( ident_t * loc, kmp_int3
 #if KMP_USE_DYNAMIC_LOCK
     // Assumption: all direct locks fit in OMP_CRITICAL_SIZE.
     // The global sequence __kmp_user_lock_seq is used unless compiler pushes a value.
-    if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
+    if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
         lck = (kmp_user_lock_p)crit;
         // The thread that reaches here first needs to tag the lock word.
         if (*((kmp_dyna_lock_t *)lck) == 0) {
-            KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, DYNA_GET_D_TAG(__kmp_user_lock_seq));
+            KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
         }
         if (__kmp_env_consistency_check) {
             __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
@@ -1138,17 +1138,17 @@ __kmpc_critical( ident_t * loc, kmp_int3
 # if USE_ITT_BUILD
         __kmp_itt_critical_acquiring(lck);
 # endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
         if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
-            DYNA_ACQUIRE_TAS_LOCK(lck, global_tid);
+            KMP_ACQUIRE_TAS_LOCK(lck, global_tid);
         } else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
         if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
-            DYNA_ACQUIRE_FUTEX_LOCK(lck, global_tid);
+            KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid);
         } else
 # endif
         {
-            DYNA_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
+            KMP_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
         }
     } else {
         kmp_indirect_lock_t *ilk = __kmp_get_indirect_csptr(crit, loc, global_tid, __kmp_user_lock_seq);
@@ -1159,7 +1159,7 @@ __kmpc_critical( ident_t * loc, kmp_int3
 # if USE_ITT_BUILD
         __kmp_itt_critical_acquiring(lck);
 # endif
-        DYNA_I_LOCK_FUNC(ilk, set)(lck, global_tid);
+        KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
     }
 
 #else // KMP_USE_DYNAMIC_LOCK
@@ -1223,7 +1223,7 @@ __kmpc_end_critical(ident_t *loc, kmp_in
     KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid ));
 
 #if KMP_USE_DYNAMIC_LOCK
-    if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
+    if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
         lck = (kmp_user_lock_p)crit;
         KMP_ASSERT(lck != NULL);
         if (__kmp_env_consistency_check) {
@@ -1232,17 +1232,17 @@ __kmpc_end_critical(ident_t *loc, kmp_in
 # if USE_ITT_BUILD
         __kmp_itt_critical_releasing( lck );
 # endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
         if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
-            DYNA_RELEASE_TAS_LOCK(lck, global_tid);
+            KMP_RELEASE_TAS_LOCK(lck, global_tid);
         } else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
         if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
-            DYNA_RELEASE_FUTEX_LOCK(lck, global_tid);
+            KMP_RELEASE_FUTEX_LOCK(lck, global_tid);
         } else
 # endif
         {
-            DYNA_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
+            KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
         }
     } else {
         kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
@@ -1254,7 +1254,7 @@ __kmpc_end_critical(ident_t *loc, kmp_in
 # if USE_ITT_BUILD
         __kmp_itt_critical_releasing( lck );
 # endif
-        DYNA_I_LOCK_FUNC(ilk, unset)(lck, global_tid);
+        KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid);
     }
 
 #else // KMP_USE_DYNAMIC_LOCK
@@ -1757,15 +1757,15 @@ __kmpc_init_lock( ident_t * loc, kmp_int
     if (__kmp_env_consistency_check && user_lock == NULL) {
         KMP_FATAL(LockIsUninitialized, "omp_init_lock");
     }
-    if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
-        DYNA_INIT_D_LOCK(user_lock, __kmp_user_lock_seq);
+    if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
+        KMP_INIT_D_LOCK(user_lock, __kmp_user_lock_seq);
 # if USE_ITT_BUILD
         __kmp_itt_lock_creating((kmp_user_lock_p)user_lock, NULL);
 # endif
     } else {
-        DYNA_INIT_I_LOCK(user_lock, __kmp_user_lock_seq);
-        kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock);
-        DYNA_SET_I_LOCK_LOCATION(ilk, loc);
+        KMP_INIT_I_LOCK(user_lock, __kmp_user_lock_seq);
+        kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
+        KMP_SET_I_LOCK_LOCATION(ilk, loc);
 # if USE_ITT_BUILD
         __kmp_itt_lock_creating(ilk->lock, loc);
 # endif
@@ -1828,7 +1828,7 @@ __kmpc_init_nest_lock( ident_t * loc, km
     kmp_dyna_lockseq_t nested_seq;
     switch (__kmp_user_lock_seq) {
         case lockseq_tas:       nested_seq = lockseq_nested_tas;        break;
-#if DYNA_HAS_FUTEX
+#if KMP_HAS_FUTEX
         case lockseq_futex:     nested_seq = lockseq_nested_futex;      break;
 #endif
         case lockseq_ticket:    nested_seq = lockseq_nested_ticket;     break;
@@ -1837,10 +1837,10 @@ __kmpc_init_nest_lock( ident_t * loc, km
         default:                nested_seq = lockseq_nested_queuing;    break;
                                 // Use nested queuing lock for lock kinds without "nested" implementation.
     }
-    DYNA_INIT_I_LOCK(user_lock, nested_seq);
+    KMP_INIT_I_LOCK(user_lock, nested_seq);
     // All nested locks are indirect locks.
-    kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock);
-    DYNA_SET_I_LOCK_LOCATION(ilk, loc);
+    kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
+    KMP_SET_I_LOCK_LOCATION(ilk, loc);
 # if USE_ITT_BUILD
     __kmp_itt_lock_creating(ilk->lock, loc);
 # endif
@@ -1897,14 +1897,14 @@ __kmpc_destroy_lock( ident_t * loc, kmp_
 
 # if USE_ITT_BUILD
     kmp_user_lock_p lck;
-    if (DYNA_EXTRACT_D_TAG(user_lock) == 0) {
-        lck = ((kmp_indirect_lock_t *)DYNA_LOOKUP_I_LOCK(user_lock))->lock;
+    if (KMP_EXTRACT_D_TAG(user_lock) == 0) {
+        lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock;
     } else {
         lck = (kmp_user_lock_p)user_lock;
     }
     __kmp_itt_lock_destroyed(lck);
 # endif
-    DYNA_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
+    KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
 #else
     kmp_user_lock_p lck;
 
@@ -1956,10 +1956,10 @@ __kmpc_destroy_nest_lock( ident_t * loc,
 #if KMP_USE_DYNAMIC_LOCK
 
 # if USE_ITT_BUILD
-    kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock);
+    kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
     __kmp_itt_lock_destroyed(ilk->lock);
 # endif
-    DYNA_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
+    KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
 
 #else // KMP_USE_DYNAMIC_LOCK
 
@@ -2014,17 +2014,17 @@ void
 __kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
     KMP_COUNT_BLOCK(OMP_set_lock);
 #if KMP_USE_DYNAMIC_LOCK
-    int tag = DYNA_EXTRACT_D_TAG(user_lock);
+    int tag = KMP_EXTRACT_D_TAG(user_lock);
 # if USE_ITT_BUILD
    __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object.
 # endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
     if (tag == locktag_tas && !__kmp_env_consistency_check) {
-        DYNA_ACQUIRE_TAS_LOCK(user_lock, gtid);
+        KMP_ACQUIRE_TAS_LOCK(user_lock, gtid);
     } else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
     if (tag == locktag_futex && !__kmp_env_consistency_check) {
-        DYNA_ACQUIRE_FUTEX_LOCK(user_lock, gtid);
+        KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid);
     } else
 # endif
     {
@@ -2079,7 +2079,7 @@ __kmpc_set_nest_lock( ident_t * loc, kmp
 # if USE_ITT_BUILD
     __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
 # endif
-    DYNA_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid);
+    KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid);
 # if USE_ITT_BUILD
     __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
 #endif
@@ -2132,17 +2132,17 @@ __kmpc_unset_lock( ident_t *loc, kmp_int
 {
 #if KMP_USE_DYNAMIC_LOCK
 
-    int tag = DYNA_EXTRACT_D_TAG(user_lock);
+    int tag = KMP_EXTRACT_D_TAG(user_lock);
 # if USE_ITT_BUILD
     __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
 # endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
     if (tag == locktag_tas && !__kmp_env_consistency_check) {
-        DYNA_RELEASE_TAS_LOCK(user_lock, gtid);
+        KMP_RELEASE_TAS_LOCK(user_lock, gtid);
     } else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
     if (tag == locktag_futex && !__kmp_env_consistency_check) {
-        DYNA_RELEASE_FUTEX_LOCK(user_lock, gtid);
+        KMP_RELEASE_FUTEX_LOCK(user_lock, gtid);
     } else
 # endif
     {
@@ -2205,7 +2205,7 @@ __kmpc_unset_nest_lock( ident_t *loc, km
 # if USE_ITT_BUILD
     __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
 # endif
-    DYNA_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid);
+    KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid);
 
 #else // KMP_USE_DYNAMIC_LOCK
 
@@ -2272,17 +2272,17 @@ __kmpc_test_lock( ident_t *loc, kmp_int3
 
 #if KMP_USE_DYNAMIC_LOCK
     int rc;
-    int tag = DYNA_EXTRACT_D_TAG(user_lock);
+    int tag = KMP_EXTRACT_D_TAG(user_lock);
 # if USE_ITT_BUILD
     __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
 # endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
     if (tag == locktag_tas && !__kmp_env_consistency_check) {
-        DYNA_TEST_TAS_LOCK(user_lock, gtid, rc);
+        KMP_TEST_TAS_LOCK(user_lock, gtid, rc);
     } else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
     if (tag == locktag_futex && !__kmp_env_consistency_check) {
-        DYNA_TEST_FUTEX_LOCK(user_lock, gtid, rc);
+        KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc);
     } else
 # endif
     {
@@ -2347,7 +2347,7 @@ __kmpc_test_nest_lock( ident_t *loc, kmp
 # if USE_ITT_BUILD
     __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
 # endif
-    rc = DYNA_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid);
+    rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid);
 # if USE_ITT_BUILD
     if (rc) {
         __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
@@ -2427,23 +2427,23 @@ __kmp_enter_critical_section_reduce_bloc
 
 #if KMP_USE_DYNAMIC_LOCK
 
-    if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
+    if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
         lck = (kmp_user_lock_p)crit;
         if (*((kmp_dyna_lock_t *)lck) == 0) {
-            KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, DYNA_GET_D_TAG(__kmp_user_lock_seq));
+            KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
         }
         KMP_DEBUG_ASSERT(lck != NULL);
         if (__kmp_env_consistency_check) {
             __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
         }
-        DYNA_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
+        KMP_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
     } else {
         kmp_indirect_lock_t *ilk = __kmp_get_indirect_csptr(crit, loc, global_tid, __kmp_user_lock_seq);
         KMP_DEBUG_ASSERT(ilk != NULL);
         if (__kmp_env_consistency_check) {
             __kmp_push_sync(global_tid, ct_critical, loc, ilk->lock, __kmp_user_lock_seq);
         }
-        DYNA_I_LOCK_FUNC(ilk, set)(ilk->lock, global_tid);
+        KMP_I_LOCK_FUNC(ilk, set)(ilk->lock, global_tid);
     }
 
 #else // KMP_USE_DYNAMIC_LOCK
@@ -2475,16 +2475,16 @@ __kmp_end_critical_section_reduce_block(
 
 #if KMP_USE_DYNAMIC_LOCK
 
-    if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
+    if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
         lck = (kmp_user_lock_p)crit;
         if (__kmp_env_consistency_check)
             __kmp_pop_sync(global_tid, ct_critical, loc);
-        DYNA_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
+        KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
     } else {
         kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
         if (__kmp_env_consistency_check)
             __kmp_pop_sync(global_tid, ct_critical, loc);
-        DYNA_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid);
+        KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid);
     }
 
 #else // KMP_USE_DYNAMIC_LOCK

Modified: openmp/trunk/runtime/src/kmp_itt.inl
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_itt.inl?rev=254637&r1=254636&r2=254637&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_itt.inl (original)
+++ openmp/trunk/runtime/src/kmp_itt.inl Thu Dec  3 13:37:20 2015
@@ -798,8 +798,8 @@ __kmp_itt_lock_acquiring( kmp_user_lock_
 #if KMP_USE_DYNAMIC_LOCK && USE_ITT_NOTIFY
     // postpone lock object access
     if ( __itt_sync_prepare_ptr ) {
-        if ( DYNA_EXTRACT_D_TAG(lock) == 0 ) {
-            kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(lock);
+        if ( KMP_EXTRACT_D_TAG(lock) == 0 ) {
+            kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
             __itt_sync_prepare( ilk->lock );
         } else {
             __itt_sync_prepare( lock );
@@ -815,8 +815,8 @@ __kmp_itt_lock_acquired( kmp_user_lock_p
 #if KMP_USE_DYNAMIC_LOCK && USE_ITT_NOTIFY
     // postpone lock object access
     if ( __itt_sync_acquired_ptr ) {
-        if ( DYNA_EXTRACT_D_TAG(lock) == 0 ) {
-            kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(lock);
+        if ( KMP_EXTRACT_D_TAG(lock) == 0 ) {
+            kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
             __itt_sync_acquired( ilk->lock );
         } else {
             __itt_sync_acquired( lock );
@@ -831,8 +831,8 @@ void
 __kmp_itt_lock_releasing( kmp_user_lock_p lock ) {
 #if KMP_USE_DYNAMIC_LOCK && USE_ITT_NOTIFY
     if ( __itt_sync_releasing_ptr ) {
-        if ( DYNA_EXTRACT_D_TAG(lock) == 0 ) {
-            kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(lock);
+        if ( KMP_EXTRACT_D_TAG(lock) == 0 ) {
+            kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
             __itt_sync_releasing( ilk->lock );
         } else {
             __itt_sync_releasing( lock );
@@ -847,8 +847,8 @@ void
 __kmp_itt_lock_cancelled( kmp_user_lock_p lock ) {
 #if KMP_USE_DYNAMIC_LOCK && USE_ITT_NOTIFY
     if ( __itt_sync_cancel_ptr ) {
-        if ( DYNA_EXTRACT_D_TAG(lock) == 0 ) {
-            kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(lock);
+        if ( KMP_EXTRACT_D_TAG(lock) == 0 ) {
+            kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
             __itt_sync_cancel( ilk->lock );
         } else {
             __itt_sync_cancel( lock );

Modified: openmp/trunk/runtime/src/kmp_lock.cpp
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_lock.cpp?rev=254637&r1=254636&r2=254637&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_lock.cpp (original)
+++ openmp/trunk/runtime/src/kmp_lock.cpp Thu Dec  3 13:37:20 2015
@@ -75,7 +75,7 @@ __kmp_validate_locks( void )
 static kmp_int32
 __kmp_get_tas_lock_owner( kmp_tas_lock_t *lck )
 {
-    return DYNA_LOCK_STRIP(TCR_4( lck->lk.poll )) - 1;
+    return KMP_LOCK_STRIP(TCR_4( lck->lk.poll )) - 1;
 }
 
 static inline bool
@@ -96,8 +96,8 @@ __kmp_acquire_tas_lock_timed_template( k
     /* else __kmp_printf( "." );*/
 #endif /* USE_LOCK_PROFILE */
 
-    if ( ( lck->lk.poll == DYNA_LOCK_FREE(tas) )
-      && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas) ) ) {
+    if ( ( lck->lk.poll == KMP_LOCK_FREE(tas) )
+      && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) {
         KMP_FSYNC_ACQUIRED(lck);
         return KMP_LOCK_ACQUIRED_FIRST;
     }
@@ -113,8 +113,8 @@ __kmp_acquire_tas_lock_timed_template( k
         KMP_YIELD_SPIN( spins );
     }
 
-    while ( ( lck->lk.poll != DYNA_LOCK_FREE(tas) ) ||
-      ( ! KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas) ) ) ) {
+    while ( ( lck->lk.poll != KMP_LOCK_FREE(tas) ) ||
+      ( ! KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) ) {
         //
         // FIXME - use exponential backoff here
         //
@@ -153,8 +153,8 @@ __kmp_acquire_tas_lock_with_checks( kmp_
 int
 __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
 {
-    if ( ( lck->lk.poll == DYNA_LOCK_FREE(tas) )
-      && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas) ) ) {
+    if ( ( lck->lk.poll == KMP_LOCK_FREE(tas) )
+      && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) {
         KMP_FSYNC_ACQUIRED( lck );
         return TRUE;
     }
@@ -178,7 +178,7 @@ __kmp_release_tas_lock( kmp_tas_lock_t *
     KMP_MB();       /* Flush all pending memory write invalidates.  */
 
     KMP_FSYNC_RELEASING(lck);
-    KMP_ST_REL32( &(lck->lk.poll), DYNA_LOCK_FREE(tas) );
+    KMP_ST_REL32( &(lck->lk.poll), KMP_LOCK_FREE(tas) );
     KMP_MB();       /* Flush all pending memory write invalidates.  */
 
     KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
@@ -208,7 +208,7 @@ __kmp_release_tas_lock_with_checks( kmp_
 void
 __kmp_init_tas_lock( kmp_tas_lock_t * lck )
 {
-    TCW_4( lck->lk.poll, DYNA_LOCK_FREE(tas) );
+    TCW_4( lck->lk.poll, KMP_LOCK_FREE(tas) );
 }
 
 static void
@@ -375,7 +375,7 @@ __kmp_destroy_nested_tas_lock_with_check
 static kmp_int32
 __kmp_get_futex_lock_owner( kmp_futex_lock_t *lck )
 {
-    return DYNA_LOCK_STRIP(( TCR_4( lck->lk.poll ) >> 1 )) - 1;
+    return KMP_LOCK_STRIP(( TCR_4( lck->lk.poll ) >> 1 )) - 1;
 }
 
 static inline bool
@@ -404,10 +404,10 @@ __kmp_acquire_futex_lock_timed_template(
 
     kmp_int32 poll_val;
 
-    while ( ( poll_val = KMP_COMPARE_AND_STORE_RET32( & ( lck->lk.poll ), DYNA_LOCK_FREE(futex),
-             DYNA_LOCK_BUSY(gtid_code, futex) ) ) != DYNA_LOCK_FREE(futex) ) {
+    while ( ( poll_val = KMP_COMPARE_AND_STORE_RET32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex),
+             KMP_LOCK_BUSY(gtid_code, futex) ) ) != KMP_LOCK_FREE(futex) ) {
 
-        kmp_int32 cond = DYNA_LOCK_STRIP(poll_val) & 1;
+        kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
         KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
            lck, gtid, poll_val, cond ) );
 
@@ -424,12 +424,12 @@ __kmp_acquire_futex_lock_timed_template(
             // Try to set the lsb in the poll to indicate to the owner
             // thread that they need to wake this thread up.
             //
-            if ( ! KMP_COMPARE_AND_STORE_REL32( & ( lck->lk.poll ), poll_val, poll_val | DYNA_LOCK_BUSY(1, futex) ) ) {
+            if ( ! KMP_COMPARE_AND_STORE_REL32( & ( lck->lk.poll ), poll_val, poll_val | KMP_LOCK_BUSY(1, futex) ) ) {
                 KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
                   lck, lck->lk.poll, gtid ) );
                 continue;
             }
-            poll_val |= DYNA_LOCK_BUSY(1, futex);
+            poll_val |= KMP_LOCK_BUSY(1, futex);
 
             KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n",
               lck, lck->lk.poll, gtid ) );
@@ -486,7 +486,7 @@ __kmp_acquire_futex_lock_with_checks( km
 int
 __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
 {
-    if ( KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), DYNA_LOCK_FREE(futex), DYNA_LOCK_BUSY(gtid+1, futex) << 1 ) ) {
+    if ( KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1, futex) << 1 ) ) {
         KMP_FSYNC_ACQUIRED( lck );
         return TRUE;
     }
@@ -514,15 +514,15 @@ __kmp_release_futex_lock( kmp_futex_lock
 
     KMP_FSYNC_RELEASING(lck);
 
-    kmp_int32 poll_val = KMP_XCHG_FIXED32( & ( lck->lk.poll ), DYNA_LOCK_FREE(futex) );
+    kmp_int32 poll_val = KMP_XCHG_FIXED32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex) );
 
     KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
        lck, gtid, poll_val ) );
 
-    if ( DYNA_LOCK_STRIP(poll_val) & 1 ) {
+    if ( KMP_LOCK_STRIP(poll_val) & 1 ) {
         KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
            lck, gtid ) );
-        syscall( __NR_futex, & ( lck->lk.poll ), FUTEX_WAKE, DYNA_LOCK_BUSY(1, futex), NULL, NULL, 0 );
+        syscall( __NR_futex, & ( lck->lk.poll ), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0 );
     }
 
     KMP_MB();       /* Flush all pending memory write invalidates.  */
@@ -557,7 +557,7 @@ __kmp_release_futex_lock_with_checks( km
 void
 __kmp_init_futex_lock( kmp_futex_lock_t * lck )
 {
-    TCW_4( lck->lk.poll, DYNA_LOCK_FREE(futex) );
+    TCW_4( lck->lk.poll, KMP_LOCK_FREE(futex) );
 }
 
 static void
@@ -3019,13 +3019,13 @@ typedef enum kmp_lock_hint_t {
 // Direct lock initializers. It simply writes a tag to the low 8 bits of the lock word.
 #define expand_init_lock(l, a)                                              \
 static void init_##l##_lock(kmp_dyna_lock_t *lck, kmp_dyna_lockseq_t seq) { \
-    *lck = DYNA_LOCK_FREE(l);                                               \
+    *lck = KMP_LOCK_FREE(l);                                                \
     KA_TRACE(20, ("Initialized direct lock, tag = %x\n", *lck));            \
 }
 FOREACH_D_LOCK(expand_init_lock, 0)
 #undef expand_init_lock
 
-#if DYNA_HAS_HLE
+#if KMP_HAS_HLE
 
 // HLE lock functions - imported from the testbed runtime.
 #if KMP_MIC
@@ -3055,16 +3055,16 @@ __kmp_destroy_hle_lock(kmp_dyna_lock_t *
 static void
 __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid)
 {
-    // Use gtid for DYNA_LOCK_BUSY if necessary
-    if (swap4(lck, DYNA_LOCK_BUSY(1, hle)) != DYNA_LOCK_FREE(hle)) {
+    // Use gtid for KMP_LOCK_BUSY if necessary
+    if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
         int delay = 1;
         do {
-            while (*(kmp_uint32 volatile *)lck != DYNA_LOCK_FREE(hle)) {
+            while (*(kmp_uint32 volatile *)lck != KMP_LOCK_FREE(hle)) {
                 for (int i = delay; i != 0; --i)
                     machine_pause();
                 delay = ((delay << 1) | 1) & 7;
             }
-        } while (swap4(lck, DYNA_LOCK_BUSY(1, hle)) != DYNA_LOCK_FREE(hle));
+        } while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
     }
 }
 
@@ -3079,7 +3079,7 @@ __kmp_release_hle_lock(kmp_dyna_lock_t *
 {
     __asm__ volatile(HLE_RELEASE "movl %1,%0"
                     : "=m"(*lck)
-                    : "r"(DYNA_LOCK_FREE(hle))
+                    : "r"(KMP_LOCK_FREE(hle))
                     : "memory");
 }
 
@@ -3092,7 +3092,7 @@ __kmp_release_hle_lock_with_checks(kmp_d
 static int
 __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid)
 {
-    return swap4(lck, DYNA_LOCK_BUSY(1, hle)) == DYNA_LOCK_FREE(hle);
+    return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
 }
 
 static int
@@ -3101,7 +3101,7 @@ __kmp_test_hle_lock_with_checks(kmp_dyna
     return __kmp_test_hle_lock(lck, gtid); // TODO: add checks
 }
 
-#endif // DYNA_HAS_HLE
+#endif // KMP_HAS_HLE
 
 // Entry functions for indirect locks (first element of direct_*_ops[]).
 static void __kmp_init_indirect_lock(kmp_dyna_lock_t * l, kmp_dyna_lockseq_t tag);
@@ -3128,16 +3128,16 @@ void (*__kmp_direct_destroy_ops[])(kmp_d
 // Differentiates *lock and *lock_with_checks.
 #define expand_func2(l, op)  0,(void (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_##lock,
 #define expand_func2c(l, op) 0,(void (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_##lock_with_checks,
-static void (*direct_set_tab[][DYNA_NUM_D_LOCKS*2+2])(kmp_dyna_lock_t *, kmp_int32)
+static void (*direct_set_tab[][KMP_NUM_D_LOCKS*2+2])(kmp_dyna_lock_t *, kmp_int32)
     = { { __kmp_set_indirect_lock, 0, FOREACH_D_LOCK(expand_func2, acquire)  },
         { __kmp_set_indirect_lock_with_checks, 0, FOREACH_D_LOCK(expand_func2c, acquire) } };
-static void (*direct_unset_tab[][DYNA_NUM_D_LOCKS*2+2])(kmp_dyna_lock_t *, kmp_int32)
+static void (*direct_unset_tab[][KMP_NUM_D_LOCKS*2+2])(kmp_dyna_lock_t *, kmp_int32)
     = { { __kmp_unset_indirect_lock, 0, FOREACH_D_LOCK(expand_func2, release)  },
         { __kmp_unset_indirect_lock_with_checks, 0, FOREACH_D_LOCK(expand_func2c, release) } };
 
 #define expand_func3(l, op)  0,(int  (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_##lock,
 #define expand_func3c(l, op) 0,(int  (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_##lock_with_checks,
-static int  (*direct_test_tab[][DYNA_NUM_D_LOCKS*2+2])(kmp_dyna_lock_t *, kmp_int32)
+static int  (*direct_test_tab[][KMP_NUM_D_LOCKS*2+2])(kmp_dyna_lock_t *, kmp_int32)
     = { { __kmp_test_indirect_lock, 0, FOREACH_D_LOCK(expand_func3, test)  },
         { __kmp_test_indirect_lock_with_checks, 0, FOREACH_D_LOCK(expand_func3c, test) } };
 
@@ -3158,16 +3158,16 @@ void (*__kmp_indirect_destroy_ops[])(kmp
 // Differentiates *lock and *lock_with_checks.
 #define expand_func5(l, op)  (void (*)(kmp_user_lock_p, kmp_int32))__kmp_##op##_##l##_##lock,
 #define expand_func5c(l, op) (void (*)(kmp_user_lock_p, kmp_int32))__kmp_##op##_##l##_##lock_with_checks,
-static void (*indirect_set_tab[][DYNA_NUM_I_LOCKS])(kmp_user_lock_p, kmp_int32)
+static void (*indirect_set_tab[][KMP_NUM_I_LOCKS])(kmp_user_lock_p, kmp_int32)
     = { { FOREACH_I_LOCK(expand_func5, acquire)  },
         { FOREACH_I_LOCK(expand_func5c, acquire) } };
-static void (*indirect_unset_tab[][DYNA_NUM_I_LOCKS])(kmp_user_lock_p, kmp_int32)
+static void (*indirect_unset_tab[][KMP_NUM_I_LOCKS])(kmp_user_lock_p, kmp_int32)
     = { { FOREACH_I_LOCK(expand_func5, release)  },
         { FOREACH_I_LOCK(expand_func5c, release) } };
 
 #define expand_func6(l, op)  (int  (*)(kmp_user_lock_p, kmp_int32))__kmp_##op##_##l##_##lock,
 #define expand_func6c(l, op) (int  (*)(kmp_user_lock_p, kmp_int32))__kmp_##op##_##l##_##lock_with_checks,
-static int  (*indirect_test_tab[][DYNA_NUM_I_LOCKS])(kmp_user_lock_p, kmp_int32)
+static int  (*indirect_test_tab[][KMP_NUM_I_LOCKS])(kmp_user_lock_p, kmp_int32)
     = { { FOREACH_I_LOCK(expand_func6, test)  },
         { FOREACH_I_LOCK(expand_func6c, test) } };
 
@@ -3182,14 +3182,14 @@ kmp_lock_index_t __kmp_indirect_lock_tab
 kmp_lock_index_t __kmp_indirect_lock_table_next;
 
 // Size of indirect locks.
-static kmp_uint32 __kmp_indirect_lock_size[DYNA_NUM_I_LOCKS] = {
+static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {
     sizeof(kmp_ticket_lock_t),      sizeof(kmp_queuing_lock_t),
 #if KMP_USE_ADAPTIVE_LOCKS
     sizeof(kmp_adaptive_lock_t),
 #endif
     sizeof(kmp_drdpa_lock_t),
     sizeof(kmp_tas_lock_t),
-#if DYNA_HAS_FUTEX
+#if KMP_HAS_FUTEX
     sizeof(kmp_futex_lock_t),
 #endif
     sizeof(kmp_ticket_lock_t),      sizeof(kmp_queuing_lock_t),
@@ -3197,13 +3197,13 @@ static kmp_uint32 __kmp_indirect_lock_si
 };
 
 // Jump tables for lock accessor/modifier.
-void (*__kmp_indirect_set_location[DYNA_NUM_I_LOCKS])(kmp_user_lock_p, const ident_t *) = { 0 };
-void (*__kmp_indirect_set_flags[DYNA_NUM_I_LOCKS])(kmp_user_lock_p, kmp_lock_flags_t) = { 0 };
-const ident_t * (*__kmp_indirect_get_location[DYNA_NUM_I_LOCKS])(kmp_user_lock_p) = { 0 };
-kmp_lock_flags_t (*__kmp_indirect_get_flags[DYNA_NUM_I_LOCKS])(kmp_user_lock_p) = { 0 };
+void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p, const ident_t *) = { 0 };
+void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p, kmp_lock_flags_t) = { 0 };
+const ident_t * (*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p) = { 0 };
+kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p) = { 0 };
 
 // Use different lock pools for different lock types.
-static kmp_indirect_lock_t * __kmp_indirect_lock_pool[DYNA_NUM_I_LOCKS] = { 0 };
+static kmp_indirect_lock_t * __kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = { 0 };
 
 // Inserts the given lock ptr to the lock table.
 kmp_lock_index_t 
@@ -3271,7 +3271,7 @@ __kmp_lookup_indirect_lock(void **user_l
             KMP_FATAL(LockIsUninitialized, func);
         }
         if (OMP_LOCK_T_SIZE < sizeof(void *)) {
-            kmp_lock_index_t idx = DYNA_EXTRACT_I_INDEX(user_lock);
+            kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
             if (idx < 0 || idx >= __kmp_indirect_lock_table_size) {
                 KMP_FATAL(LockIsUninitialized, func);
             }
@@ -3285,7 +3285,7 @@ __kmp_lookup_indirect_lock(void **user_l
         return lck; 
     } else {
         if (OMP_LOCK_T_SIZE < sizeof(void *)) {
-            return __kmp_indirect_lock_table[DYNA_EXTRACT_I_INDEX(user_lock)];
+            return __kmp_indirect_lock_table[KMP_EXTRACT_I_INDEX(user_lock)];
         } else {
             return *((kmp_indirect_lock_t **)user_lock);
         }
@@ -3301,9 +3301,9 @@ __kmp_init_indirect_lock(kmp_dyna_lock_t
         seq = lockseq_queuing;
     }
 #endif
-    kmp_indirect_locktag_t tag = DYNA_GET_I_TAG(seq);
+    kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
     kmp_indirect_lock_t *l = __kmp_allocate_indirect_lock((void **)lock, __kmp_entry_gtid(), tag);
-    DYNA_I_LOCK_FUNC(l, init)(l->lock);
+    KMP_I_LOCK_FUNC(l, init)(l->lock);
     KA_TRACE(20, ("__kmp_init_indirect_lock: initialized indirect lock, tag = %x\n", l->type));
 }
 
@@ -3312,7 +3312,7 @@ __kmp_destroy_indirect_lock(kmp_dyna_loc
 {
     kmp_uint32 gtid = __kmp_entry_gtid();
     kmp_indirect_lock_t *l = __kmp_lookup_indirect_lock((void **)lock, "omp_destroy_lock");
-    DYNA_I_LOCK_FUNC(l, destroy)(l->lock);
+    KMP_I_LOCK_FUNC(l, destroy)(l->lock);
     kmp_indirect_locktag_t tag = l->type;
 
     __kmp_acquire_lock(&__kmp_global_lock, gtid);
@@ -3320,7 +3320,7 @@ __kmp_destroy_indirect_lock(kmp_dyna_loc
     // Use the base lock's space to keep the pool chain.
     l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
     if (OMP_LOCK_T_SIZE < sizeof(void *)) {
-        l->lock->pool.index = DYNA_EXTRACT_I_INDEX(lock);
+        l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
     }
     __kmp_indirect_lock_pool[tag] = l;
 
@@ -3330,43 +3330,43 @@ __kmp_destroy_indirect_lock(kmp_dyna_loc
 static void
 __kmp_set_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32 gtid)
 {
-    kmp_indirect_lock_t *l = DYNA_LOOKUP_I_LOCK(lock);
-    DYNA_I_LOCK_FUNC(l, set)(l->lock, gtid);
+    kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
+    KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
 }
 
 static void
 __kmp_unset_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32 gtid)
 {
-    kmp_indirect_lock_t *l = DYNA_LOOKUP_I_LOCK(lock);
-    DYNA_I_LOCK_FUNC(l, unset)(l->lock, gtid);
+    kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
+    KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
 }
 
 static int
 __kmp_test_indirect_lock(kmp_dyna_lock_t * lock, kmp_int32 gtid)
 {
-    kmp_indirect_lock_t *l = DYNA_LOOKUP_I_LOCK(lock);
-    return DYNA_I_LOCK_FUNC(l, test)(l->lock, gtid);
+    kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
+    return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
 }
 
 static void
 __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32 gtid)
 {
     kmp_indirect_lock_t *l = __kmp_lookup_indirect_lock((void **)lock, "omp_set_lock");
-    DYNA_I_LOCK_FUNC(l, set)(l->lock, gtid);
+    KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
 }
 
 static void
 __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32 gtid)
 {
     kmp_indirect_lock_t *l = __kmp_lookup_indirect_lock((void **)lock, "omp_unset_lock");
-    DYNA_I_LOCK_FUNC(l, unset)(l->lock, gtid);
+    KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
 }
 
 static int
 __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t * lock, kmp_int32 gtid)
 {
     kmp_indirect_lock_t *l = __kmp_lookup_indirect_lock((void **)lock, "omp_test_lock");
-    return DYNA_I_LOCK_FUNC(l, test)(l->lock, gtid);
+    return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
 }
 
 kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
@@ -3381,7 +3381,7 @@ __kmp_init_lock_hinted(void **lock, int
             seq = lockseq_tas;
             break;
         case kmp_lock_hint_speculative:
-#if DYNA_HAS_HLE
+#if KMP_HAS_HLE
             seq = lockseq_hle;
 #else
             seq = lockseq_tas;
@@ -3401,15 +3401,15 @@ __kmp_init_lock_hinted(void **lock, int
             seq = lockseq_queuing;
             break;
     }
-    if (DYNA_IS_D_LOCK(seq)) {
-        DYNA_INIT_D_LOCK(lock, seq);
+    if (KMP_IS_D_LOCK(seq)) {
+        KMP_INIT_D_LOCK(lock, seq);
 #if USE_ITT_BUILD
         __kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL);
 #endif
     } else {
-        DYNA_INIT_I_LOCK(lock, seq);
+        KMP_INIT_I_LOCK(lock, seq);
 #if USE_ITT_BUILD
-        kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(lock);
+        kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
         __kmp_itt_lock_creating(ilk->lock, NULL);
 #endif
     }
@@ -3423,7 +3423,7 @@ __kmp_get_user_lock_owner(kmp_user_lock_
         case lockseq_tas:
         case lockseq_nested_tas:
             return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
-#if DYNA_HAS_FUTEX
+#if KMP_HAS_FUTEX
         case lockseq_futex:
         case lockseq_nested_futex:
             return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
@@ -3462,9 +3462,9 @@ __kmp_init_nest_lock_hinted(void **lock,
             seq = lockseq_nested_queuing;
             break;
     }
-    DYNA_INIT_I_LOCK(lock, seq);
+    KMP_INIT_I_LOCK(lock, seq);
 #if USE_ITT_BUILD
-    kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(lock);
+    kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
     __kmp_itt_lock_creating(ilk->lock, NULL);
 #endif
 }
@@ -3539,7 +3539,7 @@ __kmp_cleanup_indirect_user_locks()
     int k;
 
     // Clean up locks in the pools first (they were already destroyed before going into the pools).
-    for (k = 0; k < DYNA_NUM_I_LOCKS; ++k) {
+    for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
         kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
         while (l != NULL) {
             kmp_indirect_lock_t *ll = l;
@@ -3556,7 +3556,7 @@ __kmp_cleanup_indirect_user_locks()
         kmp_indirect_lock_t *l = __kmp_indirect_lock_table[i];
         if (l != NULL) {
             // Locks not destroyed explicitly need to be destroyed here.
-            DYNA_I_LOCK_FUNC(l, destroy)(l->lock);
+            KMP_I_LOCK_FUNC(l, destroy)(l->lock);
             __kmp_free(l->lock);
             __kmp_free(l);
         }

Modified: openmp/trunk/runtime/src/kmp_lock.h
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_lock.h?rev=254637&r1=254636&r2=254637&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_lock.h (original)
+++ openmp/trunk/runtime/src/kmp_lock.h Thu Dec  3 13:37:20 2015
@@ -1030,22 +1030,22 @@ extern void __kmp_cleanup_user_locks();
 
 #if KMP_USE_DYNAMIC_LOCK
 
-#define DYNA_HAS_FUTEX          (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM))
-#define DYNA_HAS_HLE            (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC)
-#define DYNA_USE_FAST_FUTEX     0 && DYNA_HAS_FUTEX
-#define DYNA_USE_FAST_TAS       1 && DYNA_HAS_FUTEX
+#define KMP_HAS_FUTEX          (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM))
+#define KMP_HAS_HLE            (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC)
+#define KMP_USE_FAST_FUTEX     0 && KMP_HAS_FUTEX
+#define KMP_USE_FAST_TAS       1 && KMP_HAS_FUTEX
 
 // List of lock definitions; all nested locks are indirect locks.
 // hle lock is xchg lock prefixed with XACQUIRE/XRELEASE.
 // All nested locks are indirect lock types.
-#if DYNA_HAS_FUTEX
-# if DYNA_HAS_HLE
+#if KMP_HAS_FUTEX
+# if KMP_HAS_HLE
 #  define FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
-#  define DYNA_LAST_D_LOCK_SEQ lockseq_hle
+#  define KMP_LAST_D_LOCK_SEQ lockseq_hle
 # else
 #  define FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
-#  define DYNA_LAST_D_LOCK_SEQ lockseq_futex
-# endif // DYNA_HAS_HLE
+#  define KMP_LAST_D_LOCK_SEQ lockseq_futex
+# endif // KMP_HAS_HLE
 # if KMP_USE_ADAPTIVE_LOCKS
 #  define FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a)   \
                                m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
@@ -1056,13 +1056,13 @@ extern void __kmp_cleanup_user_locks();
                                m(nested_queuing, a) m(nested_drdpa, a)
 # endif // KMP_USE_ADAPTIVE_LOCKS
 #else
-# if DYNA_HAS_HLE
+# if KMP_HAS_HLE
 #  define FOREACH_D_LOCK(m, a) m(tas, a)             m(hle, a)
-#  define DYNA_LAST_D_LOCK_SEQ lockseq_hle
+#  define KMP_LAST_D_LOCK_SEQ lockseq_hle
 # else
 #  define FOREACH_D_LOCK(m, a) m(tas, a)
-#  define DYNA_LAST_D_LOCK_SEQ lockseq_tas
-# endif // DYNA_HAS_HLE
+#  define KMP_LAST_D_LOCK_SEQ lockseq_tas
+# endif // KMP_HAS_HLE
 # if KMP_USE_ADAPTIVE_LOCKS
 #  define FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a)   \
                                m(nested_tas, a)                    m(nested_ticket, a) \
@@ -1072,13 +1072,13 @@ extern void __kmp_cleanup_user_locks();
                                m(nested_tas, a)                    m(nested_ticket, a) \
                                m(nested_queuing, a) m(nested_drdpa, a)
 # endif // KMP_USE_ADAPTIVE_LOCKS
-#endif // DYNA_HAS_FUTEX
+#endif // KMP_HAS_FUTEX
 
 // Information used in dynamic dispatch
-#define DYNA_LOCK_VALUE_SHIFT 8
-#define DYNA_LOCK_TYPE_MASK   ((1<<DYNA_LOCK_VALUE_SHIFT)-1)
-#define DYNA_NUM_D_LOCKS      DYNA_LAST_D_LOCK_SEQ
-#define DYNA_NUM_I_LOCKS      (locktag_nested_drdpa+1)
+#define KMP_LOCK_VALUE_SHIFT 8
+#define KMP_LOCK_TYPE_MASK   ((1<<KMP_LOCK_VALUE_SHIFT)-1)
+#define KMP_NUM_D_LOCKS      KMP_LAST_D_LOCK_SEQ
+#define KMP_NUM_I_LOCKS      (locktag_nested_drdpa+1)
 
 // Base type for dynamic locks.
 typedef kmp_uint32 kmp_dyna_lock_t;
@@ -1101,14 +1101,14 @@ typedef enum {
 } kmp_indirect_locktag_t;
 
 // Utility macros that extract information from lock sequences.
-#define DYNA_IS_D_LOCK(seq) (seq >= lockseq_tas && seq <= DYNA_LAST_D_LOCK_SEQ)
-#define DYNA_IS_I_LOCK(seq) (seq >= lockseq_ticket && seq <= lockseq_nested_drdpa)
-#define DYNA_GET_I_TAG(seq) (kmp_indirect_locktag_t)(seq - lockseq_ticket)
-#define DYNA_GET_D_TAG(seq) (seq<<1 | 1)
+#define KMP_IS_D_LOCK(seq) (seq >= lockseq_tas && seq <= KMP_LAST_D_LOCK_SEQ)
+#define KMP_IS_I_LOCK(seq) (seq >= lockseq_ticket && seq <= lockseq_nested_drdpa)
+#define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)(seq - lockseq_ticket)
+#define KMP_GET_D_TAG(seq) (seq<<1 | 1)
 
 // Enumerates direct lock tags starting from indirect tag.
 typedef enum {
-#define expand_tag(l,a) locktag_##l = DYNA_GET_D_TAG(lockseq_##l),
+#define expand_tag(l,a) locktag_##l = KMP_GET_D_TAG(lockseq_##l),
     FOREACH_D_LOCK(expand_tag, 0)
 #undef expand_tag
 } kmp_direct_locktag_t;
@@ -1134,34 +1134,34 @@ extern void (*(*__kmp_indirect_unset_ops
 extern int  (*(*__kmp_indirect_test_ops))(kmp_user_lock_p, kmp_int32);
 
 // Extracts direct lock tag from a user lock pointer
-#define DYNA_EXTRACT_D_TAG(l)   (*((kmp_dyna_lock_t *)(l)) & DYNA_LOCK_TYPE_MASK & -(*((kmp_dyna_lock_t *)(l)) & 1))
+#define KMP_EXTRACT_D_TAG(l)   (*((kmp_dyna_lock_t *)(l)) & KMP_LOCK_TYPE_MASK & -(*((kmp_dyna_lock_t *)(l)) & 1))
 
 // Extracts indirect lock index from a user lock pointer
-#define DYNA_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
+#define KMP_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
 
 // Returns function pointer to the direct lock function with l (kmp_dyna_lock_t *) and op (operation type).
-#define DYNA_D_LOCK_FUNC(l, op) __kmp_direct_##op##_ops[DYNA_EXTRACT_D_TAG(l)]
+#define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op##_ops[KMP_EXTRACT_D_TAG(l)]
 
 // Returns function pointer to the indirect lock function with l (kmp_indirect_lock_t *) and op (operation type).
-#define DYNA_I_LOCK_FUNC(l, op) __kmp_indirect_##op##_ops[((kmp_indirect_lock_t *)(l))->type]
+#define KMP_I_LOCK_FUNC(l, op) __kmp_indirect_##op##_ops[((kmp_indirect_lock_t *)(l))->type]
 
 // Initializes a direct lock with the given lock pointer and lock sequence.
-#define DYNA_INIT_D_LOCK(l, seq) __kmp_direct_init_ops[DYNA_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
+#define KMP_INIT_D_LOCK(l, seq) __kmp_direct_init_ops[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
 
 // Initializes an indirect lock with the given lock pointer and lock sequence.
-#define DYNA_INIT_I_LOCK(l, seq) __kmp_direct_init_ops[0]((kmp_dyna_lock_t *)(l), seq)
+#define KMP_INIT_I_LOCK(l, seq) __kmp_direct_init_ops[0]((kmp_dyna_lock_t *)(l), seq)
 
 // Returns "free" lock value for the given lock type.
-#define DYNA_LOCK_FREE(type)      (locktag_##type)
+#define KMP_LOCK_FREE(type)      (locktag_##type)
 
 // Returns "busy" lock value for the given lock teyp.
-#define DYNA_LOCK_BUSY(v, type)   ((v)<<DYNA_LOCK_VALUE_SHIFT | locktag_##type)
+#define KMP_LOCK_BUSY(v, type)   ((v)<<KMP_LOCK_VALUE_SHIFT | locktag_##type)
 
 // Returns lock value after removing (shifting) lock tag.
-#define DYNA_LOCK_STRIP(v)        ((v)>>DYNA_LOCK_VALUE_SHIFT)
+#define KMP_LOCK_STRIP(v)        ((v)>>KMP_LOCK_VALUE_SHIFT)
 
 // Updates __kmp_user_lock_seq with the give lock type.
-#define DYNA_STORE_LOCK_SEQ(type) (__kmp_user_lock_seq = lockseq_##type)
+#define KMP_STORE_LOCK_SEQ(type) (__kmp_user_lock_seq = lockseq_##type)
 
 // Internal entries for hinted lock initializers.
 extern void __kmp_init_lock_hinted(void **, int);
@@ -1180,28 +1180,28 @@ extern void __kmp_cleanup_indirect_user_
 extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
 
 // Jump table for "set lock location", available only for indirect locks.
-extern void (*__kmp_indirect_set_location[DYNA_NUM_I_LOCKS])(kmp_user_lock_p, const ident_t *);
-#define DYNA_SET_I_LOCK_LOCATION(lck, loc) {                        \
+extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p, const ident_t *);
+#define KMP_SET_I_LOCK_LOCATION(lck, loc) {                         \
     if (__kmp_indirect_set_location[(lck)->type] != NULL)           \
         __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
 }
 
 // Jump table for "set lock flags", available only for indirect locks.
-extern void (*__kmp_indirect_set_flags[DYNA_NUM_I_LOCKS])(kmp_user_lock_p, kmp_lock_flags_t);
-#define DYNA_SET_I_LOCK_FLAGS(lck, flag) {                        \
+extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p, kmp_lock_flags_t);
+#define KMP_SET_I_LOCK_FLAGS(lck, flag) {                         \
     if (__kmp_indirect_set_flags[(lck)->type] != NULL)            \
         __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
 }
 
 // Jump table for "get lock location", available only for indirect locks.
-extern const ident_t * (*__kmp_indirect_get_location[DYNA_NUM_I_LOCKS])(kmp_user_lock_p);
-#define DYNA_GET_I_LOCK_LOCATION(lck) ( __kmp_indirect_get_location[(lck)->type] != NULL      \
+extern const ident_t * (*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p);
+#define KMP_GET_I_LOCK_LOCATION(lck) ( __kmp_indirect_get_location[(lck)->type] != NULL       \
                                       ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
                                       : NULL )
 
 // Jump table for "get lock flags", available only for indirect locks.
-extern kmp_lock_flags_t (*__kmp_indirect_get_flags[DYNA_NUM_I_LOCKS])(kmp_user_lock_p);
-#define DYNA_GET_I_LOCK_FLAGS(lck) ( __kmp_indirect_get_flags[(lck)->type] != NULL      \
+extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p);
+#define KMP_GET_I_LOCK_FLAGS(lck) ( __kmp_indirect_get_flags[(lck)->type] != NULL       \
                                    ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
                                    : NULL )
 
@@ -1220,8 +1220,8 @@ extern kmp_lock_index_t __kmp_indirect_l
 extern int __kmp_num_locks_in_block;
 
 // Fast lock table lookup without consistency checking
-#define DYNA_LOOKUP_I_LOCK(l) ( (OMP_LOCK_T_SIZE < sizeof(void *)) \
-                              ? __kmp_indirect_lock_table[DYNA_EXTRACT_I_INDEX(l)] \
+#define KMP_LOOKUP_I_LOCK(l) ( (OMP_LOCK_T_SIZE < sizeof(void *))                 \
+                              ? __kmp_indirect_lock_table[KMP_EXTRACT_I_INDEX(l)] \
                               : *((kmp_indirect_lock_t **)l) )
 
 // Used once in kmp_error.c
@@ -1230,10 +1230,10 @@ __kmp_get_user_lock_owner(kmp_user_lock_
 
 #else // KMP_USE_DYNAMIC_LOCK
 
-# define DYNA_LOCK_BUSY(v, type)    (v)
-# define DYNA_LOCK_FREE(type)       0
-# define DYNA_LOCK_STRIP(v)         (v)
-# define DYNA_STORE_LOCK_SEQ(seq)
+# define KMP_LOCK_BUSY(v, type)    (v)
+# define KMP_LOCK_FREE(type)       0
+# define KMP_LOCK_STRIP(v)         (v)
+# define KMP_STORE_LOCK_SEQ(seq)
 
 #endif // KMP_USE_DYNAMIC_LOCK
 

Modified: openmp/trunk/runtime/src/kmp_settings.c
URL: http://llvm.org/viewvc/llvm-project/openmp/trunk/runtime/src/kmp_settings.c?rev=254637&r1=254636&r2=254637&view=diff
==============================================================================
--- openmp/trunk/runtime/src/kmp_settings.c (original)
+++ openmp/trunk/runtime/src/kmp_settings.c Thu Dec  3 13:37:20 2015
@@ -3892,13 +3892,13 @@ __kmp_stg_parse_lock_kind( char const *
       || __kmp_str_match( "testand-set", 2, value )
       || __kmp_str_match( "testandset", 2, value ) ) {
         __kmp_user_lock_kind = lk_tas;
-        DYNA_STORE_LOCK_SEQ(tas);
+        KMP_STORE_LOCK_SEQ(tas);
     }
 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
     else if ( __kmp_str_match( "futex", 1, value ) ) {
         if ( __kmp_futex_determine_capable() ) {
             __kmp_user_lock_kind = lk_futex;
-            DYNA_STORE_LOCK_SEQ(futex);
+            KMP_STORE_LOCK_SEQ(futex);
         }
         else {
             KMP_WARNING( FutexNotSupported, name, value );
@@ -3907,12 +3907,12 @@ __kmp_stg_parse_lock_kind( char const *
 #endif
     else if ( __kmp_str_match( "ticket", 2, value ) ) {
         __kmp_user_lock_kind = lk_ticket;
-        DYNA_STORE_LOCK_SEQ(ticket);
+        KMP_STORE_LOCK_SEQ(ticket);
     }
     else if ( __kmp_str_match( "queuing", 1, value )
       || __kmp_str_match( "queue", 1, value ) ) {
         __kmp_user_lock_kind = lk_queuing;
-        DYNA_STORE_LOCK_SEQ(queuing);
+        KMP_STORE_LOCK_SEQ(queuing);
     }
     else if ( __kmp_str_match( "drdpa ticket", 1, value )
       || __kmp_str_match( "drdpa_ticket", 1, value )
@@ -3920,23 +3920,23 @@ __kmp_stg_parse_lock_kind( char const *
       || __kmp_str_match( "drdpaticket", 1, value )
       || __kmp_str_match( "drdpa", 1, value ) ) {
         __kmp_user_lock_kind = lk_drdpa;
-        DYNA_STORE_LOCK_SEQ(drdpa);
+        KMP_STORE_LOCK_SEQ(drdpa);
     }
 #if KMP_USE_ADAPTIVE_LOCKS
     else if ( __kmp_str_match( "adaptive", 1, value )  ) {
         if( __kmp_cpuinfo.rtm ) { // ??? Is cpuinfo available here?
             __kmp_user_lock_kind = lk_adaptive;
-            DYNA_STORE_LOCK_SEQ(adaptive);
+            KMP_STORE_LOCK_SEQ(adaptive);
         } else {
             KMP_WARNING( AdaptiveNotSupported, name, value );
             __kmp_user_lock_kind = lk_queuing;
-            DYNA_STORE_LOCK_SEQ(queuing);
+            KMP_STORE_LOCK_SEQ(queuing);
         }
     }
 #endif // KMP_USE_ADAPTIVE_LOCKS
 #if KMP_USE_DYNAMIC_LOCK
     else if ( __kmp_str_match("hle", 1, value) ) {
-        DYNA_STORE_LOCK_SEQ(hle);
+        KMP_STORE_LOCK_SEQ(hle);
     }
 #endif
     else {




More information about the Openmp-commits mailing list