summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src/kmp_lock.cpp
diff options
context:
space:
mode:
authorAndrey Churbanov <Andrey.Churbanov@intel.com>2015-04-29 15:52:19 +0000
committerAndrey Churbanov <Andrey.Churbanov@intel.com>2015-04-29 15:52:19 +0000
commit8d09facaf283caa8676939d428ab4575a063e9e5 (patch)
tree36f7ecfc049aead6041cb591837e41b32c1e3c93 /openmp/runtime/src/kmp_lock.cpp
parentba742f58af493f22709310388dd72951137fb82e (diff)
downloadbcm5719-llvm-8d09facaf283caa8676939d428ab4575a063e9e5.tar.gz
bcm5719-llvm-8d09facaf283caa8676939d428ab4575a063e9e5.zip
This is a preparation patch to get information about the actual release status from nested locks for OMPT.
llvm-svn: 236114
Diffstat (limited to 'openmp/runtime/src/kmp_lock.cpp')
-rw-r--r--openmp/runtime/src/kmp_lock.cpp89
1 files changed, 53 insertions, 36 deletions
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp
index 541b7ebcfb3..6e7c59cff96 100644
--- a/openmp/runtime/src/kmp_lock.cpp
+++ b/openmp/runtime/src/kmp_lock.cpp
@@ -171,7 +171,7 @@ __kmp_test_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
return __kmp_test_tas_lock( lck, gtid );
}
-void
+int
__kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
{
KMP_MB(); /* Flush all pending memory write invalidates. */
@@ -182,9 +182,10 @@ __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
__kmp_xproc ) );
+ return KMP_LOCK_RELEASED;
}
-static void
+static int
__kmp_release_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_lock";
@@ -200,7 +201,7 @@ __kmp_release_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
&& ( __kmp_get_tas_lock_owner( lck ) != gtid ) ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
- __kmp_release_tas_lock( lck, gtid );
+ return __kmp_release_tas_lock( lck, gtid );
}
void
@@ -294,7 +295,7 @@ __kmp_test_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
return __kmp_test_nested_tas_lock( lck, gtid );
}
-void
+int
__kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
{
KMP_DEBUG_ASSERT( gtid >= 0 );
@@ -302,10 +303,12 @@ __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
KMP_MB();
if ( --(lck->lk.depth_locked) == 0 ) {
__kmp_release_tas_lock( lck, gtid );
+ return KMP_LOCK_RELEASED;
}
+ return KMP_LOCK_STILL_HELD;
}
-static void
+static int
__kmp_release_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_nest_lock";
@@ -319,7 +322,7 @@ __kmp_release_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
if ( __kmp_get_tas_lock_owner( lck ) != gtid ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
- __kmp_release_nested_tas_lock( lck, gtid );
+ return __kmp_release_nested_tas_lock( lck, gtid );
}
void
@@ -497,7 +500,7 @@ __kmp_test_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
return __kmp_test_futex_lock( lck, gtid );
}
-void
+int
__kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
{
KMP_MB(); /* Flush all pending memory write invalidates. */
@@ -525,9 +528,10 @@ __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
__kmp_xproc ) );
+ return KMP_LOCK_RELEASED;
}
-static void
+static int
__kmp_release_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_lock";
@@ -543,7 +547,7 @@ __kmp_release_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
&& ( __kmp_get_futex_lock_owner( lck ) != gtid ) ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
- __kmp_release_futex_lock( lck, gtid );
+ return __kmp_release_futex_lock( lck, gtid );
}
void
@@ -637,7 +641,7 @@ __kmp_test_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid
return __kmp_test_nested_futex_lock( lck, gtid );
}
-void
+int
__kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
{
KMP_DEBUG_ASSERT( gtid >= 0 );
@@ -645,10 +649,12 @@ __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
KMP_MB();
if ( --(lck->lk.depth_locked) == 0 ) {
__kmp_release_futex_lock( lck, gtid );
+ return KMP_LOCK_RELEASED;
}
+ return KMP_LOCK_STILL_HELD;
}
-static void
+static int
__kmp_release_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_nest_lock";
@@ -662,7 +668,7 @@ __kmp_release_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gt
if ( __kmp_get_futex_lock_owner( lck ) != gtid ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
- __kmp_release_nested_futex_lock( lck, gtid );
+ return __kmp_release_nested_futex_lock( lck, gtid );
}
void
@@ -809,7 +815,7 @@ __kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
return retval;
}
-void
+int
__kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
kmp_uint32 distance;
@@ -825,9 +831,10 @@ __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
KMP_YIELD( distance
> (kmp_uint32) (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) );
+ return KMP_LOCK_RELEASED;
}
-static void
+static int
__kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_lock";
@@ -846,7 +853,7 @@ __kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
KMP_FATAL( LockUnsettingSetByAnother, func );
}
lck->lk.owner_id = 0;
- __kmp_release_ticket_lock( lck, gtid );
+ return __kmp_release_ticket_lock( lck, gtid );
}
void
@@ -964,7 +971,7 @@ __kmp_test_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck,
return __kmp_test_nested_ticket_lock( lck, gtid );
}
-void
+int
__kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
KMP_DEBUG_ASSERT( gtid >= 0 );
@@ -974,10 +981,12 @@ __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
KMP_MB();
lck->lk.owner_id = 0;
__kmp_release_ticket_lock( lck, gtid );
+ return KMP_LOCK_RELEASED;
}
+ return KMP_LOCK_STILL_HELD;
}
-static void
+static int
__kmp_release_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_nest_lock";
@@ -994,7 +1003,7 @@ __kmp_release_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32
if ( __kmp_get_ticket_lock_owner( lck ) != gtid ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
- __kmp_release_nested_ticket_lock( lck, gtid );
+ return __kmp_release_nested_ticket_lock( lck, gtid );
}
void
@@ -1435,7 +1444,7 @@ __kmp_test_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid )
return retval;
}
-void
+int
__kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
{
register kmp_info_t *this_thr;
@@ -1482,7 +1491,7 @@ __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
#ifdef DEBUG_QUEUING_LOCKS
TRACE_LOCK_HT( gtid+1, "rel exit: ", 0, 0 );
#endif
- return;
+ return KMP_LOCK_RELEASED;
}
dequeued = FALSE;
@@ -1556,7 +1565,7 @@ __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
#ifdef DEBUG_QUEUING_LOCKS
TRACE_LOCK( gtid+1, "rel exit 2" );
#endif
- return;
+ return KMP_LOCK_RELEASED;
}
/* KMP_CPU_PAUSE( ); don't want to make releasing thread hold up acquiring threads */
@@ -1566,9 +1575,10 @@ __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
} /* while */
KMP_ASSERT2( 0, "should not get here" );
+ return KMP_LOCK_RELEASED;
}
-static void
+static int
__kmp_release_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
kmp_int32 gtid )
{
@@ -1587,7 +1597,7 @@ __kmp_release_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
KMP_FATAL( LockUnsettingSetByAnother, func );
}
lck->lk.owner_id = 0;
- __kmp_release_queuing_lock( lck, gtid );
+ return __kmp_release_queuing_lock( lck, gtid );
}
void
@@ -1711,7 +1721,7 @@ __kmp_test_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
return __kmp_test_nested_queuing_lock( lck, gtid );
}
-void
+int
__kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
{
KMP_DEBUG_ASSERT( gtid >= 0 );
@@ -1721,10 +1731,12 @@ __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
KMP_MB();
lck->lk.owner_id = 0;
__kmp_release_queuing_lock( lck, gtid );
+ return KMP_LOCK_RELEASED;
}
+ return KMP_LOCK_STILL_HELD;
}
-static void
+static int
__kmp_release_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_nest_lock";
@@ -1741,7 +1753,7 @@ __kmp_release_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int3
if ( __kmp_get_queuing_lock_owner( lck ) != gtid ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
- __kmp_release_nested_queuing_lock( lck, gtid );
+ return __kmp_release_nested_queuing_lock( lck, gtid );
}
void
@@ -2312,7 +2324,7 @@ __kmp_acquire_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gti
lck->lk.qlk.owner_id = gtid + 1;
}
-static void
+static int
__kmp_release_adaptive_lock( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
{
if ( __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) )
@@ -2327,9 +2339,10 @@ __kmp_release_adaptive_lock( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
// so should use the underlying lock's release scheme.
__kmp_release_queuing_lock( GET_QLK_PTR(lck), gtid );
}
+ return KMP_LOCK_RELEASED;
}
-static void
+static int
__kmp_release_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_lock";
@@ -2345,6 +2358,7 @@ __kmp_release_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gti
}
lck->lk.qlk.owner_id = 0;
__kmp_release_adaptive_lock( lck, gtid );
+ return KMP_LOCK_RELEASED;
}
static void
@@ -2655,7 +2669,7 @@ __kmp_test_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
return retval;
}
-void
+int
__kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
{
//
@@ -2672,9 +2686,10 @@ __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
ticket - 1, lck));
KMP_FSYNC_RELEASING(lck);
KMP_ST_REL64(&(polls[ticket & mask].poll), ticket); // volatile store
+ return KMP_LOCK_RELEASED;
}
-static void
+static int
__kmp_release_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_lock";
@@ -2693,7 +2708,7 @@ __kmp_release_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
KMP_FATAL( LockUnsettingSetByAnother, func );
}
lck->lk.owner_id = 0;
- __kmp_release_drdpa_lock( lck, gtid );
+ return __kmp_release_drdpa_lock( lck, gtid );
}
void
@@ -2829,7 +2844,7 @@ __kmp_test_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid
return __kmp_test_nested_drdpa_lock( lck, gtid );
}
-void
+int
__kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
{
KMP_DEBUG_ASSERT( gtid >= 0 );
@@ -2839,10 +2854,12 @@ __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
KMP_MB();
lck->lk.owner_id = 0;
__kmp_release_drdpa_lock( lck, gtid );
+ return KMP_LOCK_RELEASED;
}
+ return KMP_LOCK_STILL_HELD;
}
-static void
+static int
__kmp_release_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_nest_lock";
@@ -2859,7 +2876,7 @@ __kmp_release_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gt
if ( __kmp_get_drdpa_lock_owner( lck ) != gtid ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
- __kmp_release_nested_drdpa_lock( lck, gtid );
+ return __kmp_release_nested_drdpa_lock( lck, gtid );
}
void
@@ -3519,14 +3536,14 @@ kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck ) = NULL;
void ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
-void ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
+int ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck ) = NULL;
void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
void ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
-void ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
+int ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
void ( *__kmp_destroy_nested_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
OpenPOWER on IntegriCloud