diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-11-06 22:16:32 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-06 22:16:32 -0800 |
commit | 4df286e52917c95c415400367cfd523dfbb0f93a (patch) | |
tree | 70a3339e7e1d615331e5ab0a845cb24303209f68 /arch/sparc | |
parent | ddaf1b27edf72372242d752730d526b79312a44e (diff) | |
download | blackbird-op-linux-4df286e52917c95c415400367cfd523dfbb0f93a.tar.gz blackbird-op-linux-4df286e52917c95c415400367cfd523dfbb0f93a.zip |
sparc: Make atomic locks raw
SPIN_LOCK_UNLOCKED is deprecated and the locks which protect the
atomic operations have no dependency on other locks and the code is
well tested so the conversion to a raw lock is safe.
Make the lock array static while at it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/lib/atomic32.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index cbddeb38ffda..080b7c26e0fd 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -15,8 +15,8 @@ #define ATOMIC_HASH_SIZE 4 #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) -spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { - [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED +static raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { + [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED }; #else /* SMP */ @@ -31,11 +31,11 @@ int __atomic_add_return(int i, atomic_t *v) { int ret; unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = (v->counter += i); - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } EXPORT_SYMBOL(__atomic_add_return); @@ -45,12 +45,12 @@ int atomic_cmpxchg(atomic_t *v, int old, int new) int ret; unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = v->counter; if (likely(ret == old)) v->counter = new; - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } EXPORT_SYMBOL(atomic_cmpxchg); @@ -60,11 +60,11 @@ int atomic_add_unless(atomic_t *v, int a, int u) int ret; unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = v->counter; if (ret != u) v->counter += a; - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret != u; } EXPORT_SYMBOL(atomic_add_unless); @@ -74,9 +74,9 @@ void atomic_set(atomic_t *v, int i) { unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags); v->counter = i; - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags); } EXPORT_SYMBOL(atomic_set); @@ -84,10 +84,10 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; - spin_lock_irqsave(ATOMIC_HASH(addr), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old | mask; - spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } @@ -97,10 +97,10 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; - spin_lock_irqsave(ATOMIC_HASH(addr), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old & ~mask; - spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } @@ -110,10 +110,10 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; - spin_lock_irqsave(ATOMIC_HASH(addr), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old ^ mask; - spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } @@ -124,10 +124,10 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) unsigned long flags; u32 prev; - spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + __raw_spin_lock_irqsave(ATOMIC_HASH(ptr), flags); if ((prev = *ptr) == old) *ptr = new; - spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + __raw_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); return (unsigned long)prev; } |