summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/40x_mmu.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c19
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c10
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c14
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S2
-rw-r--r--arch/powerpc/mm/tlb_nohash.c6
6 files changed, 27 insertions, 28 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index 08dfa8e6d86f..65abfcfaaa9e 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -84,8 +84,8 @@ void __init MMU_init_hw(void)
* vectors and the kernel live in real-mode.
*/
- mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */
- mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */
+ mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */
+ mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */
}
#define LARGE_PAGE_SIZE_16M (1<<24)
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 056d23a1b105..784a400e0781 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3
-static DEFINE_SPINLOCK(native_tlbie_lock);
+static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
{
@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
__tlbiel(va, psize, ssize);
@@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
static inline void native_lock_hpte(struct hash_pte *hptep)
@@ -122,7 +122,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
unsigned long *word = &hptep->v;
while (1) {
- if (!test_and_set_bit(HPTE_LOCK_BIT, word))
+ if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
break;
while(test_bit(HPTE_LOCK_BIT, word))
cpu_relax();
@@ -133,8 +133,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
{
unsigned long *word = &hptep->v;
- asm volatile("lwsync":::"memory");
- clear_bit(HPTE_LOCK_BIT, word);
+ clear_bit_unlock(HPTE_LOCK_BIT, word);
}
static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
@@ -434,7 +433,7 @@ static void native_hpte_clear(void)
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once.
*/
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
slots = pteg_count * HPTES_PER_GROUP;
@@ -458,7 +457,7 @@ static void native_hpte_clear(void)
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
}
@@ -521,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local)
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
@@ -536,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local)
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
local_irq_restore(flags);
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index b910d37aea1a..51622daae09d 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -23,7 +23,7 @@
#include <asm/mmu_context.h>
static DEFINE_SPINLOCK(mmu_context_lock);
-static DEFINE_IDR(mmu_context_idr);
+static DEFINE_IDA(mmu_context_ida);
/*
* The proto-VSID space has 2^35 - 1 segments available for user mappings.
@@ -39,11 +39,11 @@ int __init_new_context(void)
int err;
again:
- if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
+ if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
return -ENOMEM;
spin_lock(&mmu_context_lock);
- err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
+ err = ida_get_new_above(&mmu_context_ida, 1, &index);
spin_unlock(&mmu_context_lock);
if (err == -EAGAIN)
@@ -53,7 +53,7 @@ again:
if (index > MAX_CONTEXT) {
spin_lock(&mmu_context_lock);
- idr_remove(&mmu_context_idr, index);
+ ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock);
return -ENOMEM;
}
@@ -85,7 +85,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
void __destroy_context(int context_id)
{
spin_lock(&mmu_context_lock);
- idr_remove(&mmu_context_idr, context_id);
+ ida_remove(&mmu_context_ida, context_id);
spin_unlock(&mmu_context_lock);
}
EXPORT_SYMBOL_GPL(__destroy_context);
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1044a634b6d0..dbc692145ecb 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -56,7 +56,7 @@ static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
-static DEFINE_SPINLOCK(context_lock);
+static DEFINE_RAW_SPINLOCK(context_lock);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -121,9 +121,9 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will happen if you have more CPUs than available contexts,
* all we can do here is wait a bit and try again
*/
- spin_unlock(&context_lock);
+ raw_spin_unlock(&context_lock);
cpu_relax();
- spin_lock(&context_lock);
+ raw_spin_lock(&context_lock);
/* This will cause the caller to try again */
return MMU_NO_CONTEXT;
@@ -194,7 +194,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
unsigned long *map;
/* No lockless fast path .. yet */
- spin_lock(&context_lock);
+ raw_spin_lock(&context_lock);
pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
cpu, next, next->context.active, next->context.id);
@@ -278,7 +278,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
/* Flick the MMU and release lock */
pr_hardcont(" -> %d\n", id);
set_context(id, next->pgd);
- spin_unlock(&context_lock);
+ raw_spin_unlock(&context_lock);
}
/*
@@ -307,7 +307,7 @@ void destroy_context(struct mm_struct *mm)
WARN_ON(mm->context.active != 0);
- spin_lock_irqsave(&context_lock, flags);
+ raw_spin_lock_irqsave(&context_lock, flags);
id = mm->context.id;
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
@@ -318,7 +318,7 @@ void destroy_context(struct mm_struct *mm)
context_mm[id] = NULL;
nr_free_contexts++;
}
- spin_unlock_irqrestore(&context_lock, flags);
+ raw_spin_unlock_irqrestore(&context_lock, flags);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index f288279e679d..8b04c54e596f 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -1,5 +1,5 @@
/*
- * Low leve TLB miss handlers for Book3E
+ * Low level TLB miss handlers for Book3E
*
* Copyright (C) 2008-2009
* Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 2fbc680c2c71..e81d5d67f834 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -150,7 +150,7 @@ EXPORT_SYMBOL(local_flush_tlb_page);
*/
#ifdef CONFIG_SMP
-static DEFINE_SPINLOCK(tlbivax_lock);
+static DEFINE_RAW_SPINLOCK(tlbivax_lock);
static int mm_is_core_local(struct mm_struct *mm)
{
@@ -232,10 +232,10 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
if (lock)
- spin_lock(&tlbivax_lock);
+ raw_spin_lock(&tlbivax_lock);
_tlbivax_bcast(vmaddr, pid, tsize, ind);
if (lock)
- spin_unlock(&tlbivax_lock);
+ raw_spin_unlock(&tlbivax_lock);
goto bail;
} else {
struct tlb_flush_param p = {
OpenPOWER on IntegriCloud