summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src/kmp_lock.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'openmp/runtime/src/kmp_lock.cpp')
-rw-r--r--openmp/runtime/src/kmp_lock.cpp39
1 files changed, 34 insertions, 5 deletions
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp
index 2cca61105e3..a1c258333b8 100644
--- a/openmp/runtime/src/kmp_lock.cpp
+++ b/openmp/runtime/src/kmp_lock.cpp
@@ -22,6 +22,8 @@
#include "kmp_lock.h"
#include "kmp_io.h"
+#include "tsan_annotations.h"
+
#if KMP_USE_FUTEX
# include <unistd.h>
# include <sys/syscall.h>
@@ -134,7 +136,9 @@ __kmp_acquire_tas_lock_timed_template( kmp_tas_lock_t *lck, kmp_int32 gtid )
int
__kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
{
- return __kmp_acquire_tas_lock_timed_template( lck, gtid );
+ int retval = __kmp_acquire_tas_lock_timed_template( lck, gtid );
+ ANNOTATE_TAS_ACQUIRED(lck);
+ return retval;
}
static int
@@ -179,6 +183,7 @@ __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
KMP_MB(); /* Flush all pending memory write invalidates. */
KMP_FSYNC_RELEASING(lck);
+ ANNOTATE_TAS_RELEASED(lck);
KMP_ST_REL32( &(lck->lk.poll), KMP_LOCK_FREE(tas) );
KMP_MB(); /* Flush all pending memory write invalidates. */
@@ -254,6 +259,7 @@ __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
}
else {
__kmp_acquire_tas_lock_timed_template( lck, gtid );
+ ANNOTATE_TAS_ACQUIRED(lck);
lck->lk.depth_locked = 1;
return KMP_LOCK_ACQUIRED_FIRST;
}
@@ -467,7 +473,9 @@ __kmp_acquire_futex_lock_timed_template( kmp_futex_lock_t *lck, kmp_int32 gtid )
int
__kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
{
- return __kmp_acquire_futex_lock_timed_template( lck, gtid );
+ int retval = __kmp_acquire_futex_lock_timed_template( lck, gtid );
+ ANNOTATE_FUTEX_ACQUIRED(lck);
+ return retval;
}
static int
@@ -514,6 +522,7 @@ __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
lck, lck->lk.poll, gtid ) );
KMP_FSYNC_RELEASING(lck);
+ ANNOTATE_FUTEX_RELEASED(lck);
kmp_int32 poll_val = KMP_XCHG_FIXED32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex) );
@@ -603,6 +612,7 @@ __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
}
else {
__kmp_acquire_futex_lock_timed_template( lck, gtid );
+ ANNOTATE_FUTEX_ACQUIRED(lck);
lck->lk.depth_locked = 1;
return KMP_LOCK_ACQUIRED_FIRST;
}
@@ -756,7 +766,9 @@ __kmp_acquire_ticket_lock_timed_template( kmp_ticket_lock_t *lck, kmp_int32 gtid
int
__kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
- return __kmp_acquire_ticket_lock_timed_template( lck, gtid );
+ int retval = __kmp_acquire_ticket_lock_timed_template( lck, gtid );
+ ANNOTATE_TICKET_ACQUIRED(lck);
+ return retval;
}
static int
@@ -826,6 +838,7 @@ __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
kmp_uint32 distance = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed ) - std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed );
+ ANNOTATE_TICKET_RELEASED(lck);
std::atomic_fetch_add_explicit( &lck->lk.now_serving, 1U, std::memory_order_release );
KMP_YIELD( distance
@@ -924,6 +937,7 @@ __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
}
else {
__kmp_acquire_ticket_lock_timed_template( lck, gtid );
+ ANNOTATE_TICKET_ACQUIRED(lck);
std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
return KMP_LOCK_ACQUIRED_FIRST;
@@ -1418,7 +1432,9 @@ __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
{
KMP_DEBUG_ASSERT( gtid >= 0 );
- return __kmp_acquire_queuing_lock_timed_template<false>( lck, gtid );
+ int retval = __kmp_acquire_queuing_lock_timed_template<false>( lck, gtid );
+ ANNOTATE_QUEUING_ACQUIRED(lck);
+ return retval;
}
static int
@@ -1468,6 +1484,7 @@ __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
if ( KMP_COMPARE_AND_STORE_ACQ32( head_id_p, 0, -1 ) ) {
KA_TRACE( 1000, ("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid ));
KMP_FSYNC_ACQUIRED(lck);
+ ANNOTATE_QUEUING_ACQUIRED(lck);
return TRUE;
}
}
@@ -1518,6 +1535,7 @@ __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
KMP_DEBUG_ASSERT( this_thr->th.th_next_waiting == 0 );
KMP_FSYNC_RELEASING(lck);
+ ANNOTATE_QUEUING_RELEASED(lck);
while( 1 ) {
kmp_int32 dequeued;
@@ -1722,6 +1740,7 @@ __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
}
else {
__kmp_acquire_queuing_lock_timed_template<false>( lck, gtid );
+ ANNOTATE_QUEUING_ACQUIRED(lck);
KMP_MB();
lck->lk.depth_locked = 1;
KMP_MB();
@@ -2370,6 +2389,7 @@ __kmp_acquire_adaptive_lock( kmp_adaptive_lock_t * lck, kmp_int32 gtid )
__kmp_acquire_queuing_lock_timed_template<FALSE>( GET_QLK_PTR(lck), gtid );
// We have acquired the base lock, so count that.
KMP_INC_STAT(lck,nonSpeculativeAcquires );
+ ANNOTATE_QUEUING_ACQUIRED(lck);
}
static void
@@ -2657,7 +2677,9 @@ __kmp_acquire_drdpa_lock_timed_template( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
int
__kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
{
- return __kmp_acquire_drdpa_lock_timed_template( lck, gtid );
+ int retval = __kmp_acquire_drdpa_lock_timed_template( lck, gtid );
+ ANNOTATE_DRDPA_ACQUIRED(lck);
+ return retval;
}
static int
@@ -2751,6 +2773,7 @@ __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
ticket - 1, lck));
KMP_FSYNC_RELEASING(lck);
+ ANNOTATE_DRDPA_RELEASED(lck);
KMP_ST_REL64(&(polls[ticket & mask].poll), ticket); // volatile store
return KMP_LOCK_RELEASED;
}
@@ -2856,6 +2879,7 @@ __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
}
else {
__kmp_acquire_drdpa_lock_timed_template( lck, gtid );
+ ANNOTATE_DRDPA_ACQUIRED(lck);
KMP_MB();
lck->lk.depth_locked = 1;
KMP_MB();
@@ -4034,12 +4058,17 @@ __kmp_user_lock_allocate( void **user_lock, kmp_int32 gtid,
if ( __kmp_lock_pool == NULL ) {
// Lock pool is empty. Allocate new memory.
+
+ // ANNOTATION: Found no good way to express the syncronisation
+ // between allocation and usage, so ignore the allocation
+ ANNOTATE_IGNORE_WRITES_BEGIN();
if ( __kmp_num_locks_in_block <= 1 ) { // Tune this cutoff point.
lck = (kmp_user_lock_p) __kmp_allocate( __kmp_user_lock_size );
}
else {
lck = __kmp_lock_block_allocate();
}
+ ANNOTATE_IGNORE_WRITES_END();
// Insert lock in the table so that it can be freed in __kmp_cleanup,
// and debugger has info on all allocated locks.
OpenPOWER on IntegriCloud