diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2012-11-27 07:41:27 +0000 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2012-11-27 07:41:27 +0000 |
| commit | 59d58665ee262c0b732bd6e4e69b3a53cb6a2f24 (patch) | |
| tree | 5762ad519f6442746e7fd08dda21fec9edc2ab1d | |
| parent | 10362c46f133366e7bfb3e70d93b31fb72c44026 (diff) | |
| download | bcm5719-llvm-59d58665ee262c0b732bd6e4e69b3a53cb6a2f24.tar.gz bcm5719-llvm-59d58665ee262c0b732bd6e4e69b3a53cb6a2f24.zip | |
tsan: add 128-bit atomic operations
llvm-svn: 168683
| -rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc | 55 | ||||
| -rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h | 36 | ||||
| -rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_stat.cc | 1 | ||||
| -rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_stat.h | 1 |
4 files changed, 88 insertions, 5 deletions
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc index c6cd63fbbc2..29f95ef59fe 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc @@ -47,6 +47,7 @@ typedef __tsan_atomic8 a8; typedef __tsan_atomic16 a16; typedef __tsan_atomic32 a32; typedef __tsan_atomic64 a64; +typedef __tsan_atomic128 a128; const morder mo_relaxed = __tsan_memory_order_relaxed; const morder mo_consume = __tsan_memory_order_consume; const morder mo_acquire = __tsan_memory_order_acquire; @@ -60,7 +61,8 @@ static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { StatInc(thr, size == 1 ? StatAtomic1 : size == 2 ? StatAtomic2 : size == 4 ? StatAtomic4 - : StatAtomic8); + : size == 8 ? StatAtomic8 + : StatAtomic16); StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed : mo == mo_consume ? StatAtomicConsume : mo == mo_acquire ? StatAtomicAcquire @@ -296,6 +298,10 @@ a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { SCOPED_ATOMIC(Load, a, mo); } +a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) { + SCOPED_ATOMIC(Load, a, mo); +} + void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } @@ -312,6 +318,10 @@ void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } +void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(Store, a, v, mo); +} + a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } @@ -328,6 +338,10 @@ a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } +a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} + a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } @@ -344,6 +358,10 @@ a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } +a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} + a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchSub, a, v, mo); } @@ -360,6 +378,10 @@ a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchSub, a, v, mo); } +a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } @@ -376,6 +398,10 @@ a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } +a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } @@ -392,6 +418,10 @@ a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } +a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } @@ -408,6 +438,10 @@ a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } +a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchNand, a, v, mo); } @@ -424,6 +458,10 @@ a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchNand, a, v, mo); } +a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); @@ -444,6 +482,11 @@ int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } +int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); @@ -464,6 +507,11 @@ int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } +int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo, morder fmo) { SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); @@ -483,6 +531,11 @@ a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } +a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + void __tsan_atomic_thread_fence(morder mo) { char* a; SCOPED_ATOMIC(Fence, mo); diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h index 996b0f708b5..37786d55a16 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h @@ -17,10 +17,11 @@ extern "C" { #endif -typedef char __tsan_atomic8; -typedef short __tsan_atomic16; // NOLINT -typedef int __tsan_atomic32; -typedef long __tsan_atomic64; // NOLINT +typedef char __tsan_atomic8; +typedef short __tsan_atomic16; // NOLINT +typedef int __tsan_atomic32; +typedef long __tsan_atomic64; // NOLINT +typedef __int128 __tsan_atomic128; // Part of ABI, do not change. // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup @@ -41,6 +42,8 @@ __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo); __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a, + __tsan_memory_order mo); void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); @@ -50,6 +53,8 @@ void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); +void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v, + __tsan_memory_order mo); __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); @@ -59,6 +64,8 @@ __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); @@ -68,6 +75,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); __tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); @@ -77,6 +86,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); __tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); @@ -86,6 +97,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); @@ -95,6 +108,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); @@ -104,6 +119,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); @@ -113,6 +130,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); +__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, @@ -126,6 +145,9 @@ int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo); +int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a, + __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, @@ -139,6 +161,9 @@ int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo); +int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a, + __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); __tsan_atomic8 __tsan_atomic8_compare_exchange_val( volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, @@ -152,6 +177,9 @@ __tsan_atomic32 __tsan_atomic32_compare_exchange_val( __tsan_atomic64 __tsan_atomic64_compare_exchange_val( volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo); +__tsan_atomic128 __tsan_atomic128_compare_exchange_val( + volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); void __tsan_atomic_thread_fence(__tsan_memory_order mo); void __tsan_atomic_signal_fence(__tsan_memory_order mo); diff --git a/compiler-rt/lib/tsan/rtl/tsan_stat.cc b/compiler-rt/lib/tsan/rtl/tsan_stat.cc index c155ae61131..bef45255df6 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_stat.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_stat.cc @@ -94,6 +94,7 @@ void StatOutput(u64 *stat) { name[StatAtomic2] = " size 2 "; name[StatAtomic4] = " size 4 "; name[StatAtomic8] = " size 8 "; + name[StatAtomic16] = " size 16 "; name[StatInterceptor] = "Interceptors "; name[StatInt_longjmp] = " longjmp "; diff --git a/compiler-rt/lib/tsan/rtl/tsan_stat.h b/compiler-rt/lib/tsan/rtl/tsan_stat.h index 58bfd3ee128..94a80516e0b 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_stat.h +++ b/compiler-rt/lib/tsan/rtl/tsan_stat.h @@ -90,6 +90,7 @@ enum StatType { StatAtomic2, StatAtomic4, StatAtomic8, + StatAtomic16, // Interceptors. StatInterceptor, |

