summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-18 02:22:35 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-19 14:52:30 +1100
commit6b9c9b8a66f7696ce392d34f90d4d6aa7045f51d (patch)
tree33baf0862f45c3fbea4b683e671a1f8041a6ba10 /arch/powerpc
parent5181e7909b28abe5ce6674412a74167fd074824a (diff)
downloadblackbird-op-linux-6b9c9b8a66f7696ce392d34f90d4d6aa7045f51d.tar.gz
blackbird-op-linux-6b9c9b8a66f7696ce392d34f90d4d6aa7045f51d.zip
powerpc: Convert native_tlbie_lock to raw_spinlock
native_tlbie_lock needs to be a real spinlock in RT. Convert it to raw_spinlock. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hash_native_64.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 9e1aa4f99fac..784a400e0781 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3
-static DEFINE_SPINLOCK(native_tlbie_lock);
+static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
{
@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
__tlbiel(va, psize, ssize);
@@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
static inline void native_lock_hpte(struct hash_pte *hptep)
@@ -433,7 +433,7 @@ static void native_hpte_clear(void)
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once.
*/
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
slots = pteg_count * HPTES_PER_GROUP;
@@ -457,7 +457,7 @@ static void native_hpte_clear(void)
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
}
@@ -520,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local)
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
@@ -535,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local)
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
local_irq_restore(flags);
OpenPOWER on IntegriCloud