summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2012-10-03 13:00:13 +0000
committerDmitry Vyukov <dvyukov@google.com>2012-10-03 13:00:13 +0000
commitbe6878365df8204261786a001fe160ccf4a4b76b (patch)
tree06e8cd2205a9b202b746e47545ff37b4adb83d8a
parentb914d14e67977206ead545cf92796254881d7a54 (diff)
downloadbcm5719-llvm-be6878365df8204261786a001fe160ccf4a4b76b.tar.gz
bcm5719-llvm-be6878365df8204261786a001fe160ccf4a4b76b.zip
tsan: prepare for migration to new memory_order enum values (ABI compatible)
llvm-svn: 165106
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc51
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h2
2 files changed, 37 insertions, 16 deletions
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
index ce807f053cd..5f79ae09fa2 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -61,7 +61,26 @@ static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
: StatAtomicSeq_Cst);
}
+static bool IsLoadOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_consume
+ || mo == mo_acquire || mo == mo_seq_cst;
+}
+
+static bool IsStoreOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
+}
+
+static bool IsReleaseOrder(morder mo) {
+ return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcquireOrder(morder mo) {
+ return mo == mo_consume || mo == mo_acquire
+ || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
#define SCOPED_ATOMIC(func, ...) \
+ if ((u32)mo > 100500) mo = (morder)((u32)mo - 100500); \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
ThreadState *const thr = cur_thread(); \
const uptr pc = (uptr)__builtin_return_address(0); \
@@ -73,9 +92,9 @@ static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
morder mo) {
- CHECK(mo & (mo_relaxed | mo_consume | mo_acquire | mo_seq_cst));
+ CHECK(IsLoadOrder(mo));
T v = *a;
- if (mo & (mo_consume | mo_acquire | mo_seq_cst))
+ if (IsAcquireOrder(mo))
Acquire(thr, pc, (uptr)a);
return v;
}
@@ -83,8 +102,8 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- CHECK(mo & (mo_relaxed | mo_release | mo_seq_cst));
- if (mo & (mo_release | mo_seq_cst))
+ CHECK(IsStoreOrder(mo));
+ if (IsReleaseOrder(mo))
ReleaseStore(thr, pc, (uptr)a);
*a = v;
}
@@ -92,10 +111,10 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T>
static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ if (IsReleaseOrder(mo))
Release(thr, pc, (uptr)a);
v = __sync_lock_test_and_set(a, v);
- if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ if (IsAcquireOrder(mo))
Acquire(thr, pc, (uptr)a);
return v;
}
@@ -103,10 +122,10 @@ static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T>
static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ if (IsReleaseOrder(mo))
Release(thr, pc, (uptr)a);
v = __sync_fetch_and_add(a, v);
- if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ if (IsAcquireOrder(mo))
Acquire(thr, pc, (uptr)a);
return v;
}
@@ -114,10 +133,10 @@ static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T>
static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ if (IsReleaseOrder(mo))
Release(thr, pc, (uptr)a);
v = __sync_fetch_and_and(a, v);
- if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ if (IsAcquireOrder(mo))
Acquire(thr, pc, (uptr)a);
return v;
}
@@ -125,10 +144,10 @@ static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T>
static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ if (IsReleaseOrder(mo))
Release(thr, pc, (uptr)a);
v = __sync_fetch_and_or(a, v);
- if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ if (IsAcquireOrder(mo))
Acquire(thr, pc, (uptr)a);
return v;
}
@@ -136,10 +155,10 @@ static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T>
static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ if (IsReleaseOrder(mo))
Release(thr, pc, (uptr)a);
v = __sync_fetch_and_xor(a, v);
- if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ if (IsAcquireOrder(mo))
Acquire(thr, pc, (uptr)a);
return v;
}
@@ -147,11 +166,11 @@ static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo) {
- if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ if (IsReleaseOrder(mo))
Release(thr, pc, (uptr)a);
T cc = *c;
T pr = __sync_val_compare_and_swap(a, cc, v);
- if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ if (IsAcquireOrder(mo))
Acquire(thr, pc, (uptr)a);
if (pr == cc)
return true;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h
index faefd9a8d8e..9b9df5143ce 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h
@@ -22,6 +22,8 @@ typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
typedef enum {
__tsan_memory_order_relaxed = 1 << 0,
__tsan_memory_order_consume = 1 << 1,
OpenPOWER on IntegriCloud