summaryrefslogtreecommitdiffstats
path: root/arch/sparc/include/asm/spinlock_32.h
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-07-27 23:00:59 +0200
committerSam Ravnborg <sam@ravnborg.org>2008-07-27 23:00:59 +0200
commita439fe51a1f8eb087c22dd24d69cebae4a3addac (patch)
treee32d1fa97a220ab598d8ab364205817c5bf2bd6f /arch/sparc/include/asm/spinlock_32.h
parent837b41b5de356aa67abb2cadb5eef3efc7776f91 (diff)
downloadblackbird-op-linux-a439fe51a1f8eb087c22dd24d69cebae4a3addac.tar.gz
blackbird-op-linux-a439fe51a1f8eb087c22dd24d69cebae4a3addac.zip
sparc, sparc64: use arch/sparc/include
The majority of this patch was created by the following script: *** ASM=arch/sparc/include/asm mkdir -p $ASM git mv include/asm-sparc64/ftrace.h $ASM git rm include/asm-sparc64/* git mv include/asm-sparc/* $ASM sed -ie 's/asm-sparc64/asm/g' $ASM/* sed -ie 's/asm-sparc/asm/g' $ASM/* *** The rest was an update of the top-level Makefile to use sparc for header files when sparc64 is being build. And a small fixlet to pick up the correct unistd.h from sparc64 code. Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Diffstat (limited to 'arch/sparc/include/asm/spinlock_32.h')
-rw-r--r--arch/sparc/include/asm/spinlock_32.h192
1 files changed, 192 insertions, 0 deletions
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
new file mode 100644
index 000000000000..de2249b267c6
--- /dev/null
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -0,0 +1,192 @@
+/* spinlock.h: 32-bit Sparc spinlock support.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef __SPARC_SPINLOCK_H
+#define __SPARC_SPINLOCK_H
+
+#include <linux/threads.h> /* For NR_CPUS */
+
+#ifndef __ASSEMBLY__
+
+#include <asm/psr.h>
+
+#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
+
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ "\n1:\n\t"
+ "ldstub [%0], %%g2\n\t"
+ "orcc %%g2, 0x0, %%g0\n\t"
+ "bne,a 2f\n\t"
+ " ldub [%0], %%g2\n\t"
+ ".subsection 2\n"
+ "2:\n\t"
+ "orcc %%g2, 0x0, %%g0\n\t"
+ "bne,a 2b\n\t"
+ " ldub [%0], %%g2\n\t"
+ "b,a 1b\n\t"
+ ".previous\n"
+ : /* no outputs */
+ : "r" (lock)
+ : "g2", "memory", "cc");
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ unsigned int result;
+ __asm__ __volatile__("ldstub [%1], %0"
+ : "=r" (result)
+ : "r" (lock)
+ : "memory");
+ return (result == 0);
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
+}
+
+/* Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ *
+ * XXX This might create some problems with my dual spinlock
+ * XXX scheme, deadlocks etc. -DaveM
+ *
+ * Sort of like atomic_t's on Sparc, but even more clever.
+ *
+ * ------------------------------------
+ * | 24-bit counter | wlock | raw_rwlock_t
+ * ------------------------------------
+ * 31 8 7 0
+ *
+ * wlock signifies the one writer is in or somebody is updating
+ * counter. For a writer, if he successfully acquires the wlock,
+ * but counter is non-zero, he has to release the lock and wait,
+ * till both counter and wlock are zero.
+ *
+ * Unfortunately this scheme limits us to ~16,000,000 cpus.
+ */
+static inline void __read_lock(raw_rwlock_t *rw)
+{
+ register raw_rwlock_t *lp asm("g1");
+ lp = rw;
+ __asm__ __volatile__(
+ "mov %%o7, %%g4\n\t"
+ "call ___rw_read_enter\n\t"
+ " ldstub [%%g1 + 3], %%g2\n"
+ : /* no outputs */
+ : "r" (lp)
+ : "g2", "g4", "memory", "cc");
+}
+
+#define __raw_read_lock(lock) \
+do { unsigned long flags; \
+ local_irq_save(flags); \
+ __read_lock(lock); \
+ local_irq_restore(flags); \
+} while(0)
+
+static inline void __read_unlock(raw_rwlock_t *rw)
+{
+ register raw_rwlock_t *lp asm("g1");
+ lp = rw;
+ __asm__ __volatile__(
+ "mov %%o7, %%g4\n\t"
+ "call ___rw_read_exit\n\t"
+ " ldstub [%%g1 + 3], %%g2\n"
+ : /* no outputs */
+ : "r" (lp)
+ : "g2", "g4", "memory", "cc");
+}
+
+#define __raw_read_unlock(lock) \
+do { unsigned long flags; \
+ local_irq_save(flags); \
+ __read_unlock(lock); \
+ local_irq_restore(flags); \
+} while(0)
+
+static inline void __raw_write_lock(raw_rwlock_t *rw)
+{
+ register raw_rwlock_t *lp asm("g1");
+ lp = rw;
+ __asm__ __volatile__(
+ "mov %%o7, %%g4\n\t"
+ "call ___rw_write_enter\n\t"
+ " ldstub [%%g1 + 3], %%g2\n"
+ : /* no outputs */
+ : "r" (lp)
+ : "g2", "g4", "memory", "cc");
+ *(volatile __u32 *)&lp->lock = ~0U;
+}
+
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
+{
+ unsigned int val;
+
+ __asm__ __volatile__("ldstub [%1 + 3], %0"
+ : "=r" (val)
+ : "r" (&rw->lock)
+ : "memory");
+
+ if (val == 0) {
+ val = rw->lock & ~0xff;
+ if (val)
+ ((volatile u8*)&rw->lock)[3] = 0;
+ else
+ *(volatile u32*)&rw->lock = ~0U;
+ }
+
+ return (val == 0);
+}
+
+static inline int __read_trylock(raw_rwlock_t *rw)
+{
+ register raw_rwlock_t *lp asm("g1");
+ register int res asm("o0");
+ lp = rw;
+ __asm__ __volatile__(
+ "mov %%o7, %%g4\n\t"
+ "call ___rw_read_try\n\t"
+ " ldstub [%%g1 + 3], %%g2\n"
+ : "=r" (res)
+ : "r" (lp)
+ : "g2", "g4", "memory", "cc");
+ return res;
+}
+
+#define __raw_read_trylock(lock) \
+({ unsigned long flags; \
+ int res; \
+ local_irq_save(flags); \
+ res = __read_trylock(lock); \
+ local_irq_restore(flags); \
+ res; \
+})
+
+#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
+
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
+#define __raw_write_can_lock(rw) (!(rw)->lock)
+
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* __SPARC_SPINLOCK_H */
OpenPOWER on IntegriCloud