diff options
author | Andrey Churbanov <Andrey.Churbanov@intel.com> | 2015-03-10 09:03:42 +0000 |
---|---|---|
committer | Andrey Churbanov <Andrey.Churbanov@intel.com> | 2015-03-10 09:03:42 +0000 |
commit | 7b2ab719fe1babf22e2a4168b7bf346b7ec750af (patch) | |
tree | 4ff7d9641b6e8e97cc53381589a6d06f9a890f5a /openmp | |
parent | 128755741f8d223777edfb77b70204314158d9b4 (diff) | |
download | bcm5719-llvm-7b2ab719fe1babf22e2a4168b7bf346b7ec750af.tar.gz bcm5719-llvm-7b2ab719fe1babf22e2a4168b7bf346b7ec750af.zip |
Adding some 8-bit atomic operations for future use
llvm-svn: 231774
Diffstat (limited to 'openmp')
-rw-r--r-- | openmp/runtime/src/kmp_os.h | 17 | ||||
-rw-r--r-- | openmp/runtime/src/z_Linux_util.c | 51 | ||||
-rw-r--r-- | openmp/runtime/src/z_Windows_NT-586_util.c | 52 |
3 files changed, 118 insertions, 2 deletions
diff --git a/openmp/runtime/src/kmp_os.h b/openmp/runtime/src/kmp_os.h index 1112096d9ee..20868cd5b4e 100644 --- a/openmp/runtime/src/kmp_os.h +++ b/openmp/runtime/src/kmp_os.h @@ -458,6 +458,9 @@ enum kmp_mem_fence_type { # define KMP_TEST_THEN_DEC_ACQ32(p) InterlockedExchangeAdd( (volatile long *)(p), -1 ) # define KMP_TEST_THEN_ADD32(p, v) InterlockedExchangeAdd( (volatile long *)(p), (v) ) +extern kmp_int8 __kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 v ); +extern kmp_int8 __kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 v ); +extern kmp_int8 __kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 v ); # define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) InterlockedCompareExchange( (volatile long *)(p),(long)(sv),(long)(cv) ) # define KMP_XCHG_FIXED32(p, v) InterlockedExchange( (volatile long *)(p), (long)(v) ) @@ -494,8 +497,11 @@ extern kmp_int32 __kmp_xchg_fixed32( volatile kmp_int32 *p, kmp_int32 v ); extern kmp_int64 __kmp_xchg_fixed64( volatile kmp_int64 *p, kmp_int64 v ); extern kmp_real32 __kmp_xchg_real32( volatile kmp_real32 *p, kmp_real32 v ); extern kmp_real64 __kmp_xchg_real64( volatile kmp_real64 *p, kmp_real64 v ); +# define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8( (p), (v) ) //# define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32( (p), 1 ) +# define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8( (p), (v) ) +# define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8( (p), (v) ) //# define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32( (p), 1 ) # define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64( (p), 1LL ) # define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64( (p), 1LL ) @@ -544,9 +550,12 @@ extern kmp_real64 __kmp_xchg_real64( volatile kmp_real64 *p, kmp_real64 v ); #elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64) +# define KMP_TEST_THEN_ADD8(p, v) __sync_fetch_and_add( (kmp_int8 *)(p), (v) ) /* cast p to correct type so that proper intrinsic will be used */ # define KMP_TEST_THEN_INC32(p) __sync_fetch_and_add( (kmp_int32 *)(p), 1 ) +# define KMP_TEST_THEN_OR8(p, v) __sync_fetch_and_or( (kmp_int8 *)(p), (v) ) +# define KMP_TEST_THEN_AND8(p, v) __sync_fetch_and_and( (kmp_int8 *)(p), (v) ) # define KMP_TEST_THEN_INC_ACQ32(p) __sync_fetch_and_add( (kmp_int32 *)(p), 1 ) # define KMP_TEST_THEN_INC64(p) __sync_fetch_and_add( (kmp_int64 *)(p), 1LL ) # define KMP_TEST_THEN_INC_ACQ64(p) __sync_fetch_and_add( (kmp_int64 *)(p), 1LL ) @@ -586,6 +595,9 @@ extern kmp_real64 __kmp_xchg_real64( volatile kmp_real64 *p, kmp_real64 v ); #define KMP_XCHG_FIXED32(p, v) __sync_lock_test_and_set( (volatile kmp_uint32 *)(p), (kmp_uint32)(v) ) #define KMP_XCHG_FIXED64(p, v) __sync_lock_test_and_set( (volatile kmp_uint64 *)(p), (kmp_uint64)(v) ) +extern kmp_int8 __kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 v ); +extern kmp_int8 __kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 v ); +extern kmp_int8 __kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 v ); inline kmp_real32 KMP_XCHG_REAL32( volatile kmp_real32 *p, kmp_real32 v) { kmp_int32 tmp = __sync_lock_test_and_set( (kmp_int32*)p, *(kmp_int32*)&v); @@ -621,9 +633,12 @@ extern kmp_int16 __kmp_xchg_fixed16( volatile kmp_int16 *p, kmp_int16 v ); extern kmp_int32 __kmp_xchg_fixed32( volatile kmp_int32 *p, kmp_int32 v ); extern kmp_int64 __kmp_xchg_fixed64( volatile kmp_int64 *p, kmp_int64 v ); extern kmp_real32 __kmp_xchg_real32( volatile kmp_real32 *p, kmp_real32 v ); +# define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8( (p), (v) ) extern kmp_real64 __kmp_xchg_real64( volatile kmp_real64 *p, kmp_real64 v ); # define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32( (p), 1 ) +# define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8( (p), (v) ) +# define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8( (p), (v) ) # define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32( (p), 1 ) # define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64( (p), 1LL ) # define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64( (p), 1LL ) @@ -713,6 +728,8 @@ extern kmp_real64 __kmp_xchg_real64( volatile kmp_real64 *p, kmp_real64 v ); # define KMP_LD_ACQ64(A) ( *(A) ) #endif +#define TCR_1(a) (a) +#define TCW_1(a,b) (a) = (b) /* ------------------------------------------------------------------------ */ // // FIXME - maybe this should this be diff --git a/openmp/runtime/src/z_Linux_util.c b/openmp/runtime/src/z_Linux_util.c index a7b4eb12757..4e3236db564 100644 --- a/openmp/runtime/src/z_Linux_util.c +++ b/openmp/runtime/src/z_Linux_util.c @@ -437,6 +437,40 @@ __kmp_change_thread_affinity_mask( int gtid, kmp_affin_mask_t *new_mask, if ( old_mask != NULL ) { status = __kmp_get_system_affinity( old_mask, TRUE ); int error = errno; +kmp_int8 +__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d ) +{ + kmp_int8 old_value, new_value; + + old_value = TCR_1( *p ); + new_value = old_value | d; + + while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) ) + { + KMP_CPU_PAUSE(); + old_value = TCR_1( *p ); + new_value = old_value | d; + } + return old_value; +} + +kmp_int8 +__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d ) +{ + kmp_int8 old_value, new_value; + + old_value = TCR_1( *p ); + new_value = old_value & d; + + while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) ) + { + KMP_CPU_PAUSE(); + old_value = TCR_1( *p ); + new_value = old_value & d; + } + return old_value; +} + if ( status != 0 ) { __kmp_msg( kmp_ms_fatal, @@ -472,6 +506,23 @@ __kmp_change_thread_affinity_mask( int gtid, kmp_affin_mask_t *new_mask, #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && !KMP_OS_CNK +kmp_int8 +__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d ) +{ + kmp_int8 old_value, new_value; + + old_value = TCR_1( *p ); + new_value = old_value + d; + + while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) ) + { + KMP_CPU_PAUSE(); + old_value = TCR_1( *p ); + new_value = old_value + d; + } + return old_value; +} + int __kmp_futex_determine_capable() { diff --git a/openmp/runtime/src/z_Windows_NT-586_util.c b/openmp/runtime/src/z_Windows_NT-586_util.c index 7b99f75e0f7..3aeafae9102 100644 --- a/openmp/runtime/src/z_Windows_NT-586_util.c +++ b/openmp/runtime/src/z_Windows_NT-586_util.c @@ -20,6 +20,40 @@ * use compare_and_store for these routines */ +kmp_int8 +__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d ) +{ + kmp_int8 old_value, new_value; + + old_value = TCR_1( *p ); + new_value = old_value | d; + + while ( ! __kmp_compare_and_store8 ( p, old_value, new_value ) ) + { + KMP_CPU_PAUSE(); + old_value = TCR_1( *p ); + new_value = old_value | d; + } + return old_value; +} + +kmp_int8 +__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d ) +{ + kmp_int8 old_value, new_value; + + old_value = TCR_1( *p ); + new_value = old_value & d; + + while ( ! __kmp_compare_and_store8 ( p, old_value, new_value ) ) + { + KMP_CPU_PAUSE(); + old_value = TCR_1( *p ); + new_value = old_value & d; + } + return old_value; +} + kmp_int32 __kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d ) { @@ -34,7 +68,6 @@ __kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d ) old_value = TCR_4( *p ); new_value = old_value | d; } - return old_value; } @@ -55,6 +88,22 @@ __kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d ) return old_value; } +kmp_int8 +__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d ) +{ + kmp_int64 old_value, new_value; + + old_value = TCR_1( *p ); + new_value = old_value + d; + while ( ! __kmp_compare_and_store8 ( p, old_value, new_value ) ) + { + KMP_CPU_PAUSE(); + old_value = TCR_1( *p ); + new_value = old_value + d; + } + return old_value; +} + #if KMP_ARCH_X86 kmp_int64 __kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d ) @@ -69,7 +118,6 @@ __kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d ) old_value = TCR_8( *p ); new_value = old_value + d; } - return old_value; } #endif /* KMP_ARCH_X86 */ |