summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src/kmp_lock.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'openmp/runtime/src/kmp_lock.cpp')
-rw-r--r--openmp/runtime/src/kmp_lock.cpp47
1 files changed, 23 insertions, 24 deletions
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp
index 1adfec641c1..bb18a6d196e 100644
--- a/openmp/runtime/src/kmp_lock.cpp
+++ b/openmp/runtime/src/kmp_lock.cpp
@@ -1135,7 +1135,7 @@ __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
kmp_int32 need_mf = 1;
#if OMPT_SUPPORT
- ompt_state_t prev_state = ompt_state_undefined;
+ omp_state_t prev_state = omp_state_undefined;
#endif
KA_TRACE(1000,
@@ -1243,7 +1243,7 @@ __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
#endif
#if OMPT_SUPPORT
- if (ompt_enabled && prev_state != ompt_state_undefined) {
+ if (ompt_enabled.enabled && prev_state != omp_state_undefined) {
/* change the state before clearing wait_id */
this_thr->th.ompt_thread_info.state = prev_state;
this_thr->th.ompt_thread_info.wait_id = 0;
@@ -1258,11 +1258,11 @@ __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
}
#if OMPT_SUPPORT
- if (ompt_enabled && prev_state == ompt_state_undefined) {
+ if (ompt_enabled.enabled && prev_state == omp_state_undefined) {
/* this thread will spin; set wait_id before entering wait state */
prev_state = this_thr->th.ompt_thread_info.state;
this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
- this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
+ this_thr->th.ompt_thread_info.state = omp_state_wait_lock;
}
#endif
@@ -2911,11 +2911,11 @@ static int __kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
kmp_dyna_lockseq_t tag);
static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
-static void __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
+static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
-static void __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
- kmp_int32);
+static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
+ kmp_int32);
static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
kmp_int32);
static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
@@ -2938,14 +2938,13 @@ void (*__kmp_direct_destroy[])(kmp_dyna_lock_t *) = {
// set/acquire functions
#define expand(l, op) \
- 0, (void (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
-static void (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
+ 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
+static int (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
__kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
#undef expand
#define expand(l, op) \
- 0, (void (*)(kmp_dyna_lock_t *, \
- kmp_int32))__kmp_##op##_##l##_lock_with_checks,
-static void (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
+ 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
+static int (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
__kmp_set_indirect_lock_with_checks, 0,
KMP_FOREACH_D_LOCK(expand, acquire)};
#undef expand
@@ -2968,7 +2967,7 @@ static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = {
#undef expand
// Exposes only one set of jump tables (*lock or *lock_with_checks).
-void (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32) = 0;
+int (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32) = 0;
int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32) = 0;
int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32) = 0;
@@ -2982,13 +2981,13 @@ void (*__kmp_indirect_destroy[])(kmp_user_lock_p) = {
// set/acquire functions
#define expand(l, op) \
- (void (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
-static void (*indirect_set[])(kmp_user_lock_p, kmp_int32) = {
- KMP_FOREACH_I_LOCK(expand, acquire)};
+ (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
+static int (*indirect_set[])(kmp_user_lock_p,
+ kmp_int32) = {KMP_FOREACH_I_LOCK(expand, acquire)};
#undef expand
#define expand(l, op) \
- (void (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
-static void (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
+ (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
+static int (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
KMP_FOREACH_I_LOCK(expand, acquire)};
#undef expand
@@ -3009,7 +3008,7 @@ static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = {
#undef expand
// Exposes only one jump tables (*lock or *lock_with_checks).
-void (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32) = 0;
+int (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32) = 0;
int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32) = 0;
int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32) = 0;
@@ -3164,9 +3163,9 @@ static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) {
__kmp_release_lock(&__kmp_global_lock, gtid);
}
-static void __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
+static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
- KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
+ return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
}
static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
@@ -3179,11 +3178,11 @@ static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
}
-static void __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
- kmp_int32 gtid) {
+static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
+ kmp_int32 gtid) {
kmp_indirect_lock_t *l =
__kmp_lookup_indirect_lock((void **)lock, "omp_set_lock");
- KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
+ return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
}
static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
OpenPOWER on IntegriCloud