summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src/kmp_lock.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'openmp/runtime/src/kmp_lock.cpp')
-rw-r--r--openmp/runtime/src/kmp_lock.cpp158
1 files changed, 84 insertions, 74 deletions
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp
index 7bbd3c2a29a..e5fd601a2d9 100644
--- a/openmp/runtime/src/kmp_lock.cpp
+++ b/openmp/runtime/src/kmp_lock.cpp
@@ -14,6 +14,7 @@
#include <stddef.h>
+#include <atomic>
#include "kmp.h"
#include "kmp_itt.h"
@@ -719,47 +720,36 @@ __kmp_destroy_nested_futex_lock_with_checks( kmp_futex_lock_t *lck )
static kmp_int32
__kmp_get_ticket_lock_owner( kmp_ticket_lock_t *lck )
{
- return TCR_4( lck->lk.owner_id ) - 1;
+ return std::atomic_load_explicit( &lck->lk.owner_id, std::memory_order_relaxed ) - 1;
}
static inline bool
__kmp_is_ticket_lock_nestable( kmp_ticket_lock_t *lck )
{
- return lck->lk.depth_locked != -1;
+ return std::atomic_load_explicit( &lck->lk.depth_locked, std::memory_order_relaxed ) != -1;
}
static kmp_uint32
-__kmp_bakery_check(kmp_uint32 value, kmp_uint32 checker)
+__kmp_bakery_check( void *now_serving, kmp_uint32 my_ticket )
{
- register kmp_uint32 pause;
-
- if (value == checker) {
- return TRUE;
- }
- for (pause = checker - value; pause != 0; --pause);
- return FALSE;
+ return std::atomic_load_explicit( (std::atomic<unsigned> *)now_serving, std::memory_order_acquire ) == my_ticket;
}
__forceinline static int
__kmp_acquire_ticket_lock_timed_template( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
- kmp_uint32 my_ticket;
- KMP_MB();
-
- my_ticket = KMP_TEST_THEN_INC32( (kmp_int32 *) &lck->lk.next_ticket );
+ kmp_uint32 my_ticket = std::atomic_fetch_add_explicit( &lck->lk.next_ticket, 1U, std::memory_order_relaxed );
#ifdef USE_LOCK_PROFILE
- if ( TCR_4( lck->lk.now_serving ) != my_ticket )
+ if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ) != my_ticket )
__kmp_printf( "LOCK CONTENTION: %p\n", lck );
/* else __kmp_printf( "." );*/
#endif /* USE_LOCK_PROFILE */
- if ( TCR_4( lck->lk.now_serving ) == my_ticket ) {
- KMP_FSYNC_ACQUIRED(lck);
+ if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_acquire ) == my_ticket ) {
return KMP_LOCK_ACQUIRED_FIRST;
}
- KMP_WAIT_YIELD( &lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck );
- KMP_FSYNC_ACQUIRED(lck);
+ KMP_WAIT_YIELD_PTR( &lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck );
return KMP_LOCK_ACQUIRED_FIRST;
}
@@ -773,7 +763,11 @@ static int
__kmp_acquire_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_set_lock";
- if ( lck->lk.initialized != lck ) {
+
+ if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
+ KMP_FATAL( LockIsUninitialized, func );
+ }
+ if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( __kmp_is_ticket_lock_nestable( lck ) ) {
@@ -785,19 +779,19 @@ __kmp_acquire_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
__kmp_acquire_ticket_lock( lck, gtid );
- lck->lk.owner_id = gtid + 1;
+ std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
return KMP_LOCK_ACQUIRED_FIRST;
}
int
__kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
- kmp_uint32 my_ticket = TCR_4( lck->lk.next_ticket );
- if ( TCR_4( lck->lk.now_serving ) == my_ticket ) {
+ kmp_uint32 my_ticket = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed );
+
+ if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ) == my_ticket ) {
kmp_uint32 next_ticket = my_ticket + 1;
- if ( KMP_COMPARE_AND_STORE_ACQ32( (kmp_int32 *) &lck->lk.next_ticket,
- my_ticket, next_ticket ) ) {
- KMP_FSYNC_ACQUIRED( lck );
+ if ( std::atomic_compare_exchange_strong_explicit( &lck->lk.next_ticket,
+ &my_ticket, next_ticket, std::memory_order_acquire, std::memory_order_acquire )) {
return TRUE;
}
}
@@ -808,7 +802,11 @@ static int
__kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_test_lock";
- if ( lck->lk.initialized != lck ) {
+
+ if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
+ KMP_FATAL( LockIsUninitialized, func );
+ }
+ if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( __kmp_is_ticket_lock_nestable( lck ) ) {
@@ -818,7 +816,7 @@ __kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
int retval = __kmp_test_ticket_lock( lck, gtid );
if ( retval ) {
- lck->lk.owner_id = gtid + 1;
+ std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
}
return retval;
}
@@ -826,16 +824,9 @@ __kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
int
__kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
- kmp_uint32 distance;
-
- KMP_MB(); /* Flush all pending memory write invalidates. */
-
- KMP_FSYNC_RELEASING(lck);
- distance = ( TCR_4( lck->lk.next_ticket ) - TCR_4( lck->lk.now_serving ) );
+ kmp_uint32 distance = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed ) - std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed );
- KMP_ST_REL32( &(lck->lk.now_serving), lck->lk.now_serving + 1 );
-
- KMP_MB(); /* Flush all pending memory write invalidates. */
+ std::atomic_fetch_add_explicit( &lck->lk.now_serving, 1U, std::memory_order_release );
KMP_YIELD( distance
> (kmp_uint32) (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) );
@@ -846,8 +837,11 @@ static int
__kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( lck->lk.initialized != lck ) {
+
+ if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
+ KMP_FATAL( LockIsUninitialized, func );
+ }
+ if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( __kmp_is_ticket_lock_nestable( lck ) ) {
@@ -860,7 +854,7 @@ __kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
&& ( __kmp_get_ticket_lock_owner( lck ) != gtid ) ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
- lck->lk.owner_id = 0;
+ std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
return __kmp_release_ticket_lock( lck, gtid );
}
@@ -868,11 +862,12 @@ void
__kmp_init_ticket_lock( kmp_ticket_lock_t * lck )
{
lck->lk.location = NULL;
- TCW_4( lck->lk.next_ticket, 0 );
- TCW_4( lck->lk.now_serving, 0 );
- lck->lk.owner_id = 0; // no thread owns the lock.
- lck->lk.depth_locked = -1; // -1 => not a nested lock.
- lck->lk.initialized = (kmp_ticket_lock *)lck;
+ lck->lk.self = lck;
+ std::atomic_store_explicit( &lck->lk.next_ticket, 0U, std::memory_order_relaxed );
+ std::atomic_store_explicit( &lck->lk.now_serving, 0U, std::memory_order_relaxed );
+ std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed ); // no thread owns the lock.
+ std::atomic_store_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ); // -1 => not a nested lock.
+ std::atomic_store_explicit( &lck->lk.initialized, true, std::memory_order_release );
}
static void
@@ -884,19 +879,24 @@ __kmp_init_ticket_lock_with_checks( kmp_ticket_lock_t * lck )
void
__kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck )
{
- lck->lk.initialized = NULL;
- lck->lk.location = NULL;
- lck->lk.next_ticket = 0;
- lck->lk.now_serving = 0;
- lck->lk.owner_id = 0;
- lck->lk.depth_locked = -1;
+ std::atomic_store_explicit( &lck->lk.initialized, false, std::memory_order_release );
+ lck->lk.self = NULL;
+ lck->lk.location = NULL;
+ std::atomic_store_explicit( &lck->lk.next_ticket, 0U, std::memory_order_relaxed );
+ std::atomic_store_explicit( &lck->lk.now_serving, 0U, std::memory_order_relaxed );
+ std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
+ std::atomic_store_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed );
}
static void
__kmp_destroy_ticket_lock_with_checks( kmp_ticket_lock_t *lck )
{
char const * const func = "omp_destroy_lock";
- if ( lck->lk.initialized != lck ) {
+
+ if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
+ KMP_FATAL( LockIsUninitialized, func );
+ }
+ if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( __kmp_is_ticket_lock_nestable( lck ) ) {
@@ -919,15 +919,13 @@ __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
KMP_DEBUG_ASSERT( gtid >= 0 );
if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) {
- lck->lk.depth_locked += 1;
+ std::atomic_fetch_add_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
return KMP_LOCK_ACQUIRED_NEXT;
}
else {
__kmp_acquire_ticket_lock_timed_template( lck, gtid );
- KMP_MB();
- lck->lk.depth_locked = 1;
- KMP_MB();
- lck->lk.owner_id = gtid + 1;
+ std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
+ std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
return KMP_LOCK_ACQUIRED_FIRST;
}
}
@@ -936,7 +934,11 @@ static int
__kmp_acquire_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_set_nest_lock";
- if ( lck->lk.initialized != lck ) {
+
+ if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
+ KMP_FATAL( LockIsUninitialized, func );
+ }
+ if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
@@ -953,16 +955,15 @@ __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
KMP_DEBUG_ASSERT( gtid >= 0 );
if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) {
- retval = ++lck->lk.depth_locked;
+ retval = std::atomic_fetch_add_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed ) + 1;
}
else if ( !__kmp_test_ticket_lock( lck, gtid ) ) {
retval = 0;
}
else {
- KMP_MB();
- retval = lck->lk.depth_locked = 1;
- KMP_MB();
- lck->lk.owner_id = gtid + 1;
+ std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
+ std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
+ retval = 1;
}
return retval;
}
@@ -972,7 +973,11 @@ __kmp_test_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck,
kmp_int32 gtid )
{
char const * const func = "omp_test_nest_lock";
- if ( lck->lk.initialized != lck ) {
+
+ if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
+ KMP_FATAL( LockIsUninitialized, func );
+ }
+ if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
@@ -986,10 +991,8 @@ __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
KMP_DEBUG_ASSERT( gtid >= 0 );
- KMP_MB();
- if ( --(lck->lk.depth_locked) == 0 ) {
- KMP_MB();
- lck->lk.owner_id = 0;
+ if ( ( std::atomic_fetch_add_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ) - 1 ) == 0 ) {
+ std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
__kmp_release_ticket_lock( lck, gtid );
return KMP_LOCK_RELEASED;
}
@@ -1000,8 +1003,11 @@ static int
__kmp_release_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_nest_lock";
- KMP_MB(); /* in case another processor initialized lock */
- if ( lck->lk.initialized != lck ) {
+
+ if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
+ KMP_FATAL( LockIsUninitialized, func );
+ }
+ if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
@@ -1020,7 +1026,7 @@ void
__kmp_init_nested_ticket_lock( kmp_ticket_lock_t * lck )
{
__kmp_init_ticket_lock( lck );
- lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
+ std::atomic_store_explicit( &lck->lk.depth_locked, 0, std::memory_order_relaxed ); // >= 0 for nestable locks, -1 for simple locks
}
static void
@@ -1033,14 +1039,18 @@ void
__kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck )
{
__kmp_destroy_ticket_lock( lck );
- lck->lk.depth_locked = 0;
+ std::atomic_store_explicit( &lck->lk.depth_locked, 0, std::memory_order_relaxed );
}
static void
__kmp_destroy_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck )
{
char const * const func = "omp_destroy_nest_lock";
- if ( lck->lk.initialized != lck ) {
+
+ if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
+ KMP_FATAL( LockIsUninitialized, func );
+ }
+ if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
@@ -1060,7 +1070,7 @@ __kmp_destroy_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck )
static int
__kmp_is_ticket_lock_initialized( kmp_ticket_lock_t *lck )
{
- return lck == lck->lk.initialized;
+ return std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) && ( lck->lk.self == lck);
}
static const ident_t *
OpenPOWER on IntegriCloud