diff options
author | Dmitry Vyukov <dvyukov@google.com> | 2012-05-14 15:33:00 +0000 |
---|---|---|
committer | Dmitry Vyukov <dvyukov@google.com> | 2012-05-14 15:33:00 +0000 |
commit | 572c5b2a44f6d27f4311ac1ed9765a6a18d7b82d (patch) | |
tree | ae14c03077836bfd0250ed385539a24ea8eb6e8c /compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc | |
parent | 665ce2a2f4763050b51e83d71208a8ede6091c3c (diff) | |
download | bcm5719-llvm-572c5b2a44f6d27f4311ac1ed9765a6a18d7b82d.tar.gz bcm5719-llvm-572c5b2a44f6d27f4311ac1ed9765a6a18d7b82d.zip |
tsan: add more atomics to public interface (fetch_or/and/xor + 1-,2-byte versions)
llvm-svn: 156766
Diffstat (limited to 'compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc')
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc | 127 |
1 files changed, 127 insertions, 0 deletions
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc index 7e5f191a53e..7d42b2375b5 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc @@ -112,6 +112,39 @@ static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, } template<typename T> +static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) + Release(thr, pc, (uptr)a); + v = __sync_fetch_and_and(a, v); + if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) + Acquire(thr, pc, (uptr)a); + return v; +} + +template<typename T> +static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) + Release(thr, pc, (uptr)a); + v = __sync_fetch_and_or(a, v); + if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) + Acquire(thr, pc, (uptr)a); + return v; +} + +template<typename T> +static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) + Release(thr, pc, (uptr)a); + v = __sync_fetch_and_xor(a, v); + if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) + Acquire(thr, pc, (uptr)a); + return v; +} + +template<typename T> static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo) { if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) @@ -162,6 +195,14 @@ void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } +a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} + +a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} + a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } @@ -170,6 +211,14 @@ a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } +a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} + +a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} + a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } @@ -178,6 +227,64 @@ a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } +a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + +a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + +a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + +a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + +a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + +a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + +a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + +a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + +a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + +a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + +a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + +a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + +int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, + morder mo) { + SCOPED_ATOMIC(CAS, a, c, v, mo); +} + +int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, + morder mo) { + SCOPED_ATOMIC(CAS, a, c, v, mo); +} + int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, morder mo) { SCOPED_ATOMIC(CAS, a, c, v, mo); @@ -188,6 +295,26 @@ int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, SCOPED_ATOMIC(CAS, a, c, v, mo); } +int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, + morder mo) { + SCOPED_ATOMIC(CAS, a, c, v, mo); +} + +int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, + morder mo) { + SCOPED_ATOMIC(CAS, a, c, v, mo); +} + +int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, + morder mo) { + SCOPED_ATOMIC(CAS, a, c, v, mo); +} + +int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, + morder mo) { + SCOPED_ATOMIC(CAS, a, c, v, mo); +} + void __tsan_atomic_thread_fence(morder mo) { char* a; SCOPED_ATOMIC(Fence, mo); |