summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/builtins/atomic.c
diff options
context:
space:
mode:
authorSaleem Abdulrasool <compnerd@compnerd.org>2015-10-10 21:21:28 +0000
committerSaleem Abdulrasool <compnerd@compnerd.org>2015-10-10 21:21:28 +0000
commit911cfc11c449f04ca280491239fcea38f6759cfe (patch)
treeb55fb3bd2c76d39994c47dc52576e4bae4352339 /compiler-rt/lib/builtins/atomic.c
parent55b1f2920337ba89db9a5a3116fa323ac2286de0 (diff)
downloadbcm5719-llvm-911cfc11c449f04ca280491239fcea38f6759cfe.tar.gz
bcm5719-llvm-911cfc11c449f04ca280491239fcea38f6759cfe.zip
builtins: spell inline as __inline
__inline is a vendor specific spelling for inline. clang and gcc treat it the same as inline, and is available in MSVC 2013 which does not implement C99 (VS2015 supports the inline keyword though). This will allow us to build the builtins using MSVC. llvm-svn: 249953
Diffstat (limited to 'compiler-rt/lib/builtins/atomic.c')
-rw-r--r--compiler-rt/lib/builtins/atomic.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/compiler-rt/lib/builtins/atomic.c b/compiler-rt/lib/builtins/atomic.c
index 35c8837dcec..f1ddc3e0c52 100644
--- a/compiler-rt/lib/builtins/atomic.c
+++ b/compiler-rt/lib/builtins/atomic.c
@@ -56,13 +56,13 @@ static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
#include <machine/atomic.h>
#include <sys/umtx.h>
typedef struct _usem Lock;
-inline static void unlock(Lock *l) {
+__inline static void unlock(Lock *l) {
__c11_atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
if (l->_has_waiters)
_umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
}
-inline static void lock(Lock *l) {
+__inline static void lock(Lock *l) {
uint32_t old = 1;
while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
@@ -76,12 +76,12 @@ static Lock locks[SPINLOCK_COUNT] = { [0 ... SPINLOCK_COUNT-1] = {0,1,0} };
#elif defined(__APPLE__)
#include <libkern/OSAtomic.h>
typedef OSSpinLock Lock;
-inline static void unlock(Lock *l) {
+__inline static void unlock(Lock *l) {
OSSpinLockUnlock(l);
}
/// Locks a lock. In the current implementation, this is potentially
/// unbounded in the contended case.
-inline static void lock(Lock *l) {
+__inline static void lock(Lock *l) {
OSSpinLockLock(l);
}
static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
@@ -89,12 +89,12 @@ static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
#else
typedef _Atomic(uintptr_t) Lock;
/// Unlock a lock. This is a release operation.
-inline static void unlock(Lock *l) {
+__inline static void unlock(Lock *l) {
__c11_atomic_store(l, 0, __ATOMIC_RELEASE);
}
/// Locks a lock. In the current implementation, this is potentially
/// unbounded in the contended case.
-inline static void lock(Lock *l) {
+__inline static void lock(Lock *l) {
uintptr_t old = 0;
while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED))
@@ -106,7 +106,7 @@ static Lock locks[SPINLOCK_COUNT];
/// Returns a lock to use for a given pointer.
-static inline Lock *lock_for_pointer(void *ptr) {
+static __inline Lock *lock_for_pointer(void *ptr) {
intptr_t hash = (intptr_t)ptr;
// Disregard the lowest 4 bits. We want all values that may be part of the
// same memory operation to hash to the same value and therefore use the same
OpenPOWER on IntegriCloud