summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/spinlock.h
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2005-11-05 17:25:54 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 19:55:15 -0800
commit485832a5d928facd82f1525270d9f048da2063a1 (patch)
tree4266c612f32c2ccaca080af00cfee3a9f420331c /include/asm-x86_64/spinlock.h
parent8315eca25583c369e28f48909d3341dc21d6214d (diff)
downloadblackbird-op-linux-485832a5d928facd82f1525270d9f048da2063a1.tar.gz
blackbird-op-linux-485832a5d928facd82f1525270d9f048da2063a1.zip
[PATCH] x86_64: Use int operations in spinlocks to support more than 128 CPUs spinning.
Pointed out by Eric Dumazet Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-x86_64/spinlock.h')
-rw-r--r--include/asm-x86_64/spinlock.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 69636831ad2f..fe484a699cc3 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -18,22 +18,22 @@
*/
#define __raw_spin_is_locked(x) \
- (*(volatile signed char *)(&(x)->slock) <= 0)
+ (*(volatile signed int *)(&(x)->slock) <= 0)
#define __raw_spin_lock_string \
"\n1:\t" \
- "lock ; decb %0\n\t" \
+ "lock ; decl %0\n\t" \
"js 2f\n" \
LOCK_SECTION_START("") \
"2:\t" \
"rep;nop\n\t" \
- "cmpb $0,%0\n\t" \
+ "cmpl $0,%0\n\t" \
"jle 2b\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END
#define __raw_spin_unlock_string \
- "movb $1,%0" \
+ "movl $1,%0" \
:"=m" (lock->slock) : : "memory"
static inline void __raw_spin_lock(raw_spinlock_t *lock)
@@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
- char oldval;
+ int oldval;
__asm__ __volatile__(
- "xchgb %b0,%1"
+ "xchgl %0,%1"
:"=q" (oldval), "=m" (lock->slock)
:"0" (0) : "memory");
OpenPOWER on IntegriCloud