summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src/kmp_csupport.c
diff options
context:
space:
mode:
Diffstat (limited to 'openmp/runtime/src/kmp_csupport.c')
-rw-r--r--openmp/runtime/src/kmp_csupport.c166
1 files changed, 83 insertions, 83 deletions
diff --git a/openmp/runtime/src/kmp_csupport.c b/openmp/runtime/src/kmp_csupport.c
index f98b9b199df..23cf244a134 100644
--- a/openmp/runtime/src/kmp_csupport.c
+++ b/openmp/runtime/src/kmp_csupport.c
@@ -926,12 +926,12 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
ret = (kmp_indirect_lock_t *)TCR_PTR(*lck);
if (ret == NULL) {
void *idx;
- kmp_indirect_locktag_t tag = DYNA_GET_I_TAG(seq);
+ kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag);
ret = ilk;
- DYNA_I_LOCK_FUNC(ilk, init)(ilk->lock);
- DYNA_SET_I_LOCK_LOCATION(ilk, loc);
- DYNA_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
+ KMP_I_LOCK_FUNC(ilk, init)(ilk->lock);
+ KMP_SET_I_LOCK_LOCATION(ilk, loc);
+ KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
KA_TRACE(20, ("__kmp_get_indirect_csptr: initialized indirect lock #%d\n", tag));
#if USE_ITT_BUILD
__kmp_itt_critical_creating(ilk->lock, loc);
@@ -942,7 +942,7 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
__kmp_itt_critical_destroyed(ilk->lock);
#endif
// Postponing destroy, to avoid costly dispatch here.
- //DYNA_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
+ //KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
ret = (kmp_indirect_lock_t *)TCR_PTR(*lck);
KMP_DEBUG_ASSERT(ret != NULL);
}
@@ -951,10 +951,10 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
}
// Fast-path acquire tas lock
-#define DYNA_ACQUIRE_TAS_LOCK(lock, gtid) { \
+#define KMP_ACQUIRE_TAS_LOCK(lock, gtid) { \
kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \
- if (l->lk.poll != DYNA_LOCK_FREE(tas) || \
- ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas))) { \
+ if (l->lk.poll != KMP_LOCK_FREE(tas) || \
+ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \
kmp_uint32 spins; \
KMP_FSYNC_PREPARE(l); \
KMP_INIT_YIELD(spins); \
@@ -963,8 +963,8 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
} else { \
KMP_YIELD_SPIN(spins); \
} \
- while (l->lk.poll != DYNA_LOCK_FREE(tas) || \
- ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas))) { \
+ while (l->lk.poll != KMP_LOCK_FREE(tas) || \
+ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \
if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
KMP_YIELD(TRUE); \
} else { \
@@ -976,19 +976,19 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
}
// Fast-path test tas lock
-#define DYNA_TEST_TAS_LOCK(lock, gtid, rc) { \
+#define KMP_TEST_TAS_LOCK(lock, gtid, rc) { \
kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \
- rc = l->lk.poll == DYNA_LOCK_FREE(tas) && \
- KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas)); \
+ rc = l->lk.poll == KMP_LOCK_FREE(tas) && \
+ KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas)); \
}
// Fast-path release tas lock
-#define DYNA_RELEASE_TAS_LOCK(lock, gtid) { \
- TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, DYNA_LOCK_FREE(tas)); \
+#define KMP_RELEASE_TAS_LOCK(lock, gtid) { \
+ TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); \
KMP_MB(); \
}
-#if DYNA_HAS_FUTEX
+#if KMP_HAS_FUTEX
# include <unistd.h>
# include <sys/syscall.h>
@@ -1000,20 +1000,20 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
# endif
// Fast-path acquire futex lock
-#define DYNA_ACQUIRE_FUTEX_LOCK(lock, gtid) { \
+#define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
kmp_int32 gtid_code = (gtid+1) << 1; \
KMP_MB(); \
KMP_FSYNC_PREPARE(ftx); \
kmp_int32 poll_val; \
- while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex), \
- DYNA_LOCK_BUSY(gtid_code, futex))) != DYNA_LOCK_FREE(futex)) { \
- kmp_int32 cond = DYNA_LOCK_STRIP(poll_val) & 1; \
+ while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \
+ KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \
+ kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \
if (!cond) { \
- if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | DYNA_LOCK_BUSY(1, futex))) { \
+ if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) { \
continue; \
} \
- poll_val |= DYNA_LOCK_BUSY(1, futex); \
+ poll_val |= KMP_LOCK_BUSY(1, futex); \
} \
kmp_int32 rc; \
if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \
@@ -1025,9 +1025,9 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
}
// Fast-path test futex lock
-#define DYNA_TEST_FUTEX_LOCK(lock, gtid, rc) { \
+#define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
- if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex), DYNA_LOCK_BUSY(gtid+1, futex) << 1)) { \
+ if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1, futex) << 1)) { \
KMP_FSYNC_ACQUIRED(ftx); \
rc = TRUE; \
} else { \
@@ -1036,19 +1036,19 @@ __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int3
}
// Fast-path release futex lock
-#define DYNA_RELEASE_FUTEX_LOCK(lock, gtid) { \
+#define KMP_RELEASE_FUTEX_LOCK(lock, gtid) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
KMP_MB(); \
KMP_FSYNC_RELEASING(ftx); \
- kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex)); \
- if (DYNA_LOCK_STRIP(poll_val) & 1) { \
- syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, DYNA_LOCK_BUSY(1, futex), NULL, NULL, 0); \
+ kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \
+ if (KMP_LOCK_STRIP(poll_val) & 1) { \
+ syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \
} \
KMP_MB(); \
KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \
}
-#endif // DYNA_HAS_FUTEX
+#endif // KMP_HAS_FUTEX
#else // KMP_USE_DYNAMIC_LOCK
@@ -1126,11 +1126,11 @@ __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit )
#if KMP_USE_DYNAMIC_LOCK
// Assumption: all direct locks fit in OMP_CRITICAL_SIZE.
// The global sequence __kmp_user_lock_seq is used unless compiler pushes a value.
- if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
+ if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
// The thread that reaches here first needs to tag the lock word.
if (*((kmp_dyna_lock_t *)lck) == 0) {
- KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, DYNA_GET_D_TAG(__kmp_user_lock_seq));
+ KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
}
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
@@ -1138,17 +1138,17 @@ __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit )
# if USE_ITT_BUILD
__kmp_itt_critical_acquiring(lck);
# endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
- DYNA_ACQUIRE_TAS_LOCK(lck, global_tid);
+ KMP_ACQUIRE_TAS_LOCK(lck, global_tid);
} else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
- DYNA_ACQUIRE_FUTEX_LOCK(lck, global_tid);
+ KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid);
} else
# endif
{
- DYNA_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
+ KMP_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
}
} else {
kmp_indirect_lock_t *ilk = __kmp_get_indirect_csptr(crit, loc, global_tid, __kmp_user_lock_seq);
@@ -1159,7 +1159,7 @@ __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit )
# if USE_ITT_BUILD
__kmp_itt_critical_acquiring(lck);
# endif
- DYNA_I_LOCK_FUNC(ilk, set)(lck, global_tid);
+ KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
@@ -1223,7 +1223,7 @@ __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid ));
#if KMP_USE_DYNAMIC_LOCK
- if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
+ if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
KMP_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
@@ -1232,17 +1232,17 @@ __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
# if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
# endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
- DYNA_RELEASE_TAS_LOCK(lck, global_tid);
+ KMP_RELEASE_TAS_LOCK(lck, global_tid);
} else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
- DYNA_RELEASE_FUTEX_LOCK(lck, global_tid);
+ KMP_RELEASE_FUTEX_LOCK(lck, global_tid);
} else
# endif
{
- DYNA_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
+ KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
}
} else {
kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
@@ -1254,7 +1254,7 @@ __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
# if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
# endif
- DYNA_I_LOCK_FUNC(ilk, unset)(lck, global_tid);
+ KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
@@ -1757,15 +1757,15 @@ __kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_lock");
}
- if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
- DYNA_INIT_D_LOCK(user_lock, __kmp_user_lock_seq);
+ if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
+ KMP_INIT_D_LOCK(user_lock, __kmp_user_lock_seq);
# if USE_ITT_BUILD
__kmp_itt_lock_creating((kmp_user_lock_p)user_lock, NULL);
# endif
} else {
- DYNA_INIT_I_LOCK(user_lock, __kmp_user_lock_seq);
- kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock);
- DYNA_SET_I_LOCK_LOCATION(ilk, loc);
+ KMP_INIT_I_LOCK(user_lock, __kmp_user_lock_seq);
+ kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
+ KMP_SET_I_LOCK_LOCATION(ilk, loc);
# if USE_ITT_BUILD
__kmp_itt_lock_creating(ilk->lock, loc);
# endif
@@ -1828,7 +1828,7 @@ __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
kmp_dyna_lockseq_t nested_seq;
switch (__kmp_user_lock_seq) {
case lockseq_tas: nested_seq = lockseq_nested_tas; break;
-#if DYNA_HAS_FUTEX
+#if KMP_HAS_FUTEX
case lockseq_futex: nested_seq = lockseq_nested_futex; break;
#endif
case lockseq_ticket: nested_seq = lockseq_nested_ticket; break;
@@ -1837,10 +1837,10 @@ __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
default: nested_seq = lockseq_nested_queuing; break;
// Use nested queuing lock for lock kinds without "nested" implementation.
}
- DYNA_INIT_I_LOCK(user_lock, nested_seq);
+ KMP_INIT_I_LOCK(user_lock, nested_seq);
// All nested locks are indirect locks.
- kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock);
- DYNA_SET_I_LOCK_LOCATION(ilk, loc);
+ kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
+ KMP_SET_I_LOCK_LOCATION(ilk, loc);
# if USE_ITT_BUILD
__kmp_itt_lock_creating(ilk->lock, loc);
# endif
@@ -1897,14 +1897,14 @@ __kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
# if USE_ITT_BUILD
kmp_user_lock_p lck;
- if (DYNA_EXTRACT_D_TAG(user_lock) == 0) {
- lck = ((kmp_indirect_lock_t *)DYNA_LOOKUP_I_LOCK(user_lock))->lock;
+ if (KMP_EXTRACT_D_TAG(user_lock) == 0) {
+ lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock;
} else {
lck = (kmp_user_lock_p)user_lock;
}
__kmp_itt_lock_destroyed(lck);
# endif
- DYNA_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
+ KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
#else
kmp_user_lock_p lck;
@@ -1956,10 +1956,10 @@ __kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
- kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock);
+ kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
__kmp_itt_lock_destroyed(ilk->lock);
# endif
- DYNA_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
+ KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
#else // KMP_USE_DYNAMIC_LOCK
@@ -2014,17 +2014,17 @@ void
__kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
KMP_COUNT_BLOCK(OMP_set_lock);
#if KMP_USE_DYNAMIC_LOCK
- int tag = DYNA_EXTRACT_D_TAG(user_lock);
+ int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object.
# endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
- DYNA_ACQUIRE_TAS_LOCK(user_lock, gtid);
+ KMP_ACQUIRE_TAS_LOCK(user_lock, gtid);
} else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
- DYNA_ACQUIRE_FUTEX_LOCK(user_lock, gtid);
+ KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid);
} else
# endif
{
@@ -2079,7 +2079,7 @@ __kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
- DYNA_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid);
+ KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid);
# if USE_ITT_BUILD
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
#endif
@@ -2132,17 +2132,17 @@ __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
#if KMP_USE_DYNAMIC_LOCK
- int tag = DYNA_EXTRACT_D_TAG(user_lock);
+ int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
# endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
- DYNA_RELEASE_TAS_LOCK(user_lock, gtid);
+ KMP_RELEASE_TAS_LOCK(user_lock, gtid);
} else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
- DYNA_RELEASE_FUTEX_LOCK(user_lock, gtid);
+ KMP_RELEASE_FUTEX_LOCK(user_lock, gtid);
} else
# endif
{
@@ -2205,7 +2205,7 @@ __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
# if USE_ITT_BUILD
__kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
# endif
- DYNA_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid);
+ KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid);
#else // KMP_USE_DYNAMIC_LOCK
@@ -2272,17 +2272,17 @@ __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
#if KMP_USE_DYNAMIC_LOCK
int rc;
- int tag = DYNA_EXTRACT_D_TAG(user_lock);
+ int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
-# if DYNA_USE_FAST_TAS
+# if KMP_USE_FAST_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
- DYNA_TEST_TAS_LOCK(user_lock, gtid, rc);
+ KMP_TEST_TAS_LOCK(user_lock, gtid, rc);
} else
-# elif DYNA_USE_FAST_FUTEX
+# elif KMP_USE_FAST_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
- DYNA_TEST_FUTEX_LOCK(user_lock, gtid, rc);
+ KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc);
} else
# endif
{
@@ -2347,7 +2347,7 @@ __kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
- rc = DYNA_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid);
+ rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid);
# if USE_ITT_BUILD
if (rc) {
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
@@ -2427,23 +2427,23 @@ __kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid,
#if KMP_USE_DYNAMIC_LOCK
- if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
+ if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
if (*((kmp_dyna_lock_t *)lck) == 0) {
- KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, DYNA_GET_D_TAG(__kmp_user_lock_seq));
+ KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
}
KMP_DEBUG_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
}
- DYNA_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
+ KMP_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid);
} else {
kmp_indirect_lock_t *ilk = __kmp_get_indirect_csptr(crit, loc, global_tid, __kmp_user_lock_seq);
KMP_DEBUG_ASSERT(ilk != NULL);
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, ilk->lock, __kmp_user_lock_seq);
}
- DYNA_I_LOCK_FUNC(ilk, set)(ilk->lock, global_tid);
+ KMP_I_LOCK_FUNC(ilk, set)(ilk->lock, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
@@ -2475,16 +2475,16 @@ __kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, km
#if KMP_USE_DYNAMIC_LOCK
- if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) {
+ if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
if (__kmp_env_consistency_check)
__kmp_pop_sync(global_tid, ct_critical, loc);
- DYNA_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
+ KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
} else {
kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
if (__kmp_env_consistency_check)
__kmp_pop_sync(global_tid, ct_critical, loc);
- DYNA_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid);
+ KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
OpenPOWER on IntegriCloud