diff options
| author | Jonathan Peyton <jonathan.l.peyton@intel.com> | 2018-08-09 22:04:30 +0000 |
|---|---|---|
| committer | Jonathan Peyton <jonathan.l.peyton@intel.com> | 2018-08-09 22:04:30 +0000 |
| commit | baad3f6016b87cbd03578a1df6c6ea69280c4608 (patch) | |
| tree | 3e56c3fb3ee58e87ca95e731d4ed4ec9eec793c0 /openmp/runtime/src/kmp_lock.cpp | |
| parent | 7da7b56830d94447e75479cbac71e5813496da38 (diff) | |
| download | bcm5719-llvm-baad3f6016b87cbd03578a1df6c6ea69280c4608.tar.gz bcm5719-llvm-baad3f6016b87cbd03578a1df6c6ea69280c4608.zip | |
[OpenMP] Cleanup code
This patch cleans up unused functions, variables, sign compare issues, and
addresses some -Warning flags which are now enabled including -Wcast-qual.
Not all the warning flags in LibompHandleFlags.cmake are enabled, but some
are with this patch.
Some __kmp_gtid_from_* macros in kmp.h are switched to static inline functions
which allows us to remove the awkward definition of KMP_DEBUG_ASSERT() and
KMP_ASSERT() macros which used the comma operator. This had to be done for the
innumerable -Wunused-value warnings related to KMP_DEBUG_ASSERT()
Differential Revision: https://reviews.llvm.org/D49105
llvm-svn: 339393
Diffstat (limited to 'openmp/runtime/src/kmp_lock.cpp')
| -rw-r--r-- | openmp/runtime/src/kmp_lock.cpp | 151 |
1 files changed, 90 insertions, 61 deletions
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp index 8deb1fb6da9..16834c65861 100644 --- a/openmp/runtime/src/kmp_lock.cpp +++ b/openmp/runtime/src/kmp_lock.cpp @@ -197,10 +197,6 @@ void __kmp_init_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = KMP_LOCK_FREE(tas); } -static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) { - __kmp_init_tas_lock(lck); -} - void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; } static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) { @@ -297,10 +293,6 @@ void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) { lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks } -static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) { - __kmp_init_nested_tas_lock(lck); -} - void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) { __kmp_destroy_tas_lock(lck); lck->lk.depth_locked = 0; @@ -510,10 +502,6 @@ void __kmp_init_futex_lock(kmp_futex_lock_t *lck) { TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex)); } -static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) { - __kmp_init_futex_lock(lck); -} - void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; } static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) { @@ -610,10 +598,6 @@ void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) { lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks } -static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) { - __kmp_init_nested_futex_lock(lck); -} - void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) { __kmp_destroy_futex_lock(lck); lck->lk.depth_locked = 0; @@ -800,10 +784,6 @@ void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) { std::memory_order_release); } -static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { - __kmp_init_ticket_lock(lck); -} - void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) { std::atomic_store_explicit(&lck->lk.initialized, false, std::memory_order_release); @@ -955,10 +935,6 @@ void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) { // >= 0 for nestable locks, -1 for simple locks } -static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { - __kmp_init_nested_ticket_lock(lck); -} - void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) { __kmp_destroy_ticket_lock(lck); std::atomic_store_explicit(&lck->lk.depth_locked, 0, @@ -987,12 +963,6 @@ __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { // access functions to fields which don't exist for all lock kinds. -static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) { - return std::atomic_load_explicit(&lck->lk.initialized, - std::memory_order_relaxed) && - (lck->lk.self == lck); -} - static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) { return lck->lk.location; } @@ -1577,10 +1547,6 @@ void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) { KA_TRACE(1000, ("__kmp_init_queuing_lock: lock %p initialized\n", lck)); } -static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { - __kmp_init_queuing_lock(lck); -} - void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) { lck->lk.initialized = NULL; lck->lk.location = NULL; @@ -1706,11 +1672,6 @@ void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) { lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks } -static void -__kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { - __kmp_init_nested_queuing_lock(lck); -} - void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) { __kmp_destroy_queuing_lock(lck); lck->lk.depth_locked = 0; @@ -1733,10 +1694,6 @@ __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { // access functions to fields which don't exist for all lock kinds. -static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) { - return lck == lck->lk.initialized; -} - static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) { return lck->lk.location; } @@ -2252,10 +2209,6 @@ static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) { KA_TRACE(1000, ("__kmp_init_adaptive_lock: lock %p initialized\n", lck)); } -static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) { - __kmp_init_adaptive_lock(lck); -} - static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) { #if KMP_DEBUG_ADAPTIVE_LOCKS __kmp_accumulate_speculative_stats(&lck->lk.adaptive); @@ -2553,10 +2506,6 @@ void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) { KA_TRACE(1000, ("__kmp_init_drdpa_lock: lock %p initialized\n", lck)); } -static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { - __kmp_init_drdpa_lock(lck); -} - void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) { lck->lk.initialized = NULL; lck->lk.location = NULL; @@ -2689,10 +2638,6 @@ void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) { lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks } -static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { - __kmp_init_nested_drdpa_lock(lck); -} - void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) { __kmp_destroy_drdpa_lock(lck); lck->lk.depth_locked = 0; @@ -2714,10 +2659,6 @@ static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { // access functions to fields which don't exist for all lock kinds. -static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) { - return lck == lck->lk.initialized; -} - static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) { return lck->lk.location; } @@ -2797,6 +2738,10 @@ static inline kmp_uint32 swap4(kmp_uint32 volatile *p, kmp_uint32 v) { static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); } +static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *lck) { + TCW_4(*lck, 0); +} + static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) { // Use gtid for KMP_LOCK_BUSY if necessary if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) { @@ -2846,6 +2791,10 @@ static void __kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck) { __kmp_destroy_queuing_lock(lck); } +static void __kmp_destroy_rtm_lock_with_checks(kmp_queuing_lock_t *lck) { + __kmp_destroy_queuing_lock_with_checks(lck); +} + static void __kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { unsigned retries = 3, status; do { @@ -2934,7 +2883,12 @@ void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = { // destroy functions #define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock, -void (*__kmp_direct_destroy[])(kmp_dyna_lock_t *) = { +static void (*direct_destroy[])(kmp_dyna_lock_t *) = { + __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)}; +#undef expand +#define expand(l, op) \ + 0, (void (*)(kmp_dyna_lock_t *))__kmp_destroy_##l##_lock_with_checks, +static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = { __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)}; #undef expand @@ -2969,6 +2923,7 @@ static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = { #undef expand // Exposes only one set of jump tables (*lock or *lock_with_checks). +void (*(*__kmp_direct_destroy))(kmp_dyna_lock_t *) = 0; int (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32) = 0; int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32) = 0; int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32) = 0; @@ -2977,7 +2932,15 @@ int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32) = 0; #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock, void (*__kmp_indirect_init[])(kmp_user_lock_p) = { KMP_FOREACH_I_LOCK(expand, init)}; -void (*__kmp_indirect_destroy[])(kmp_user_lock_p) = { +#undef expand + +#define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock, +static void (*indirect_destroy[])(kmp_user_lock_p) = { + KMP_FOREACH_I_LOCK(expand, destroy)}; +#undef expand +#define expand(l, op) \ + (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock_with_checks, +static void (*indirect_destroy_check[])(kmp_user_lock_p) = { KMP_FOREACH_I_LOCK(expand, destroy)}; #undef expand @@ -3010,6 +2973,7 @@ static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = { #undef expand // Exposes only one jump tables (*lock or *lock_with_checks). +void (*(*__kmp_indirect_destroy))(kmp_user_lock_p) = 0; int (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32) = 0; int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32) = 0; int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32) = 0; @@ -3239,16 +3203,20 @@ void __kmp_init_dynamic_user_locks() { __kmp_direct_set = direct_set_check; __kmp_direct_unset = direct_unset_check; __kmp_direct_test = direct_test_check; + __kmp_direct_destroy = direct_destroy_check; __kmp_indirect_set = indirect_set_check; __kmp_indirect_unset = indirect_unset_check; __kmp_indirect_test = indirect_test_check; + __kmp_indirect_destroy = indirect_destroy_check; } else { __kmp_direct_set = direct_set; __kmp_direct_unset = direct_unset; __kmp_direct_test = direct_test; + __kmp_direct_destroy = direct_destroy; __kmp_indirect_set = indirect_set; __kmp_indirect_unset = indirect_unset; __kmp_indirect_test = indirect_test; + __kmp_indirect_destroy = indirect_destroy; } // If the user locks have already been initialized, then return. Allow the // switch between different KMP_CONSISTENCY_CHECK values, but do not allocate @@ -3370,6 +3338,67 @@ int __kmp_num_locks_in_block = 1; // FIXME - tune this value #else // KMP_USE_DYNAMIC_LOCK +static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) { + __kmp_init_tas_lock(lck); +} + +static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) { + __kmp_init_nested_tas_lock(lck); +} + +#if KMP_USE_FUTEX +static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) { + __kmp_init_futex_lock(lck); +} + +static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) { + __kmp_init_nested_futex_lock(lck); +} +#endif + +static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) { + return lck == lck->lk.initialized; +} + +static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { + __kmp_init_ticket_lock(lck); +} + +static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { + __kmp_init_nested_ticket_lock(lck); +} + +static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) { + return lck == lck->lk.initialized; +} + +static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { + __kmp_init_queuing_lock(lck); +} + +static void +__kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { + __kmp_init_nested_queuing_lock(lck); +} + +#if KMP_USE_ADAPTIVE_LOCKS +static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) { + __kmp_init_adaptive_lock(lck); +} +#endif + +static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) { + return lck == lck->lk.initialized; +} + +static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { + __kmp_init_drdpa_lock(lck); +} + +static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { + __kmp_init_nested_drdpa_lock(lck); +} + /* user locks * They are implemented as a table of function pointers which are set to the * lock functions of the appropriate kind, once that has been determined. */ |

