summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/44x_mmu.c5
-rw-r--r--arch/powerpc/mm/4xx_mmu.c1
-rw-r--r--arch/powerpc/mm/fault.c41
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c1
-rw-r--r--arch/powerpc/mm/hash_low_32.S1
-rw-r--r--arch/powerpc/mm/hash_low_64.S1
-rw-r--r--arch/powerpc/mm/hash_native_64.c3
-rw-r--r--arch/powerpc/mm/hash_utils_64.c107
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/init_32.c1
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/lmb.c4
-rw-r--r--arch/powerpc/mm/mem.c63
-rw-r--r--arch/powerpc/mm/mmu_context_32.c1
-rw-r--r--arch/powerpc/mm/mmu_context_64.c3
-rw-r--r--arch/powerpc/mm/numa.c201
-rw-r--r--arch/powerpc/mm/pgtable_32.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c1
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c1
-rw-r--r--arch/powerpc/mm/slb.c38
-rw-r--r--arch/powerpc/mm/slb_low.S1
-rw-r--r--arch/powerpc/mm/stab.c1
-rw-r--r--arch/powerpc/mm/tlb_32.c1
-rw-r--r--arch/powerpc/mm/tlb_64.c2
24 files changed, 187 insertions, 298 deletions
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 3d79ce281b67..0a0a0487b334 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -24,7 +24,6 @@
*
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -104,7 +103,7 @@ unsigned long __init mmu_mapin_ram(void)
/* Determine number of entries necessary to cover lowmem */
pinned_tlbs = (unsigned int)
- (_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT);
+ (_ALIGN(total_lowmem, PPC_PIN_SIZE) >> PPC44x_PIN_SHIFT);
/* Write upper watermark to save location */
tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
@@ -112,7 +111,7 @@ unsigned long __init mmu_mapin_ram(void)
/* If necessary, set additional pinned TLBs */
if (pinned_tlbs > 1)
for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
- unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
+ unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC_PIN_SIZE;
ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
}
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
index 4d006aa1a0d1..838e09db71d9 100644
--- a/arch/powerpc/mm/4xx_mmu.c
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -21,7 +21,6 @@
*
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index fdbba4206d59..e8fa50624b70 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -15,7 +15,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -40,6 +39,40 @@
#include <asm/kdebug.h>
#include <asm/siginfo.h>
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+/* Hook to register for page fault notifications */
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
@@ -142,7 +175,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
- if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return 0;
@@ -300,7 +333,7 @@ good_area:
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
@@ -353,7 +386,7 @@ bad_area_nosemaphore:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (current->pid == 1) {
+ if (is_init(current)) {
yield();
down_read(&mm->mmap_sem);
goto survive;
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 5d581bb3aa12..123da03ab118 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -26,7 +26,6 @@
*
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 94255beeecd3..bd68df5fa78a 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -21,7 +21,6 @@
*
*/
-#include <linux/config.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 52e914238959..9bc0a9c2b9bc 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -10,7 +10,6 @@
* described in the kernel's COPYING file.
*/
-#include <linux/config.h>
#include <asm/reg.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index a0f3cbd00d39..c90f124f3c71 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -520,7 +520,7 @@ static inline int tlb_batching_enabled(void)
}
#endif
-void hpte_init_native(void)
+void __init hpte_init_native(void)
{
ppc_md.hpte_invalidate = native_hpte_invalidate;
ppc_md.hpte_updatepp = native_hpte_updatepp;
@@ -530,5 +530,4 @@ void hpte_init_native(void)
ppc_md.hpte_clear_all = native_hpte_clear;
if (tlb_batching_enabled())
ppc_md.flush_hash_range = native_flush_hash_range;
- htab_finish_init();
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d03fd2b4445e..1915661c2c81 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -21,7 +21,6 @@
#undef DEBUG
#undef DEBUG_LOW
-#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/sched.h>
@@ -167,34 +166,12 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
hash = hpt_hash(va, shift);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- /* The crap below can be cleaned once ppd_md.probe() can
- * set up the hash callbacks, thus we can just used the
- * normal insert callback here.
- */
-#ifdef CONFIG_PPC_ISERIES
- if (machine_is(iseries))
- ret = iSeries_hpte_insert(hpteg, va,
- paddr,
- tmp_mode,
- HPTE_V_BOLTED,
- psize);
- else
-#endif
-#ifdef CONFIG_PPC_PSERIES
- if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR))
- ret = pSeries_lpar_hpte_insert(hpteg, va,
- paddr,
- tmp_mode,
- HPTE_V_BOLTED,
- psize);
- else
-#endif
-#ifdef CONFIG_PPC_MULTIPLATFORM
- ret = native_hpte_insert(hpteg, va,
- paddr,
- tmp_mode, HPTE_V_BOLTED,
- psize);
-#endif
+ DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
+
+ BUG_ON(!ppc_md.hpte_insert);
+ ret = ppc_md.hpte_insert(hpteg, va, paddr,
+ tmp_mode, HPTE_V_BOLTED, psize);
+
if (ret < 0)
break;
}
@@ -413,6 +390,41 @@ void create_section_mapping(unsigned long start, unsigned long end)
}
#endif /* CONFIG_MEMORY_HOTPLUG */
+static inline void make_bl(unsigned int *insn_addr, void *func)
+{
+ unsigned long funcp = *((unsigned long *)func);
+ int offset = funcp - (unsigned long)insn_addr;
+
+ *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
+ flush_icache_range((unsigned long)insn_addr, 4+
+ (unsigned long)insn_addr);
+}
+
+static void __init htab_finish_init(void)
+{
+ extern unsigned int *htab_call_hpte_insert1;
+ extern unsigned int *htab_call_hpte_insert2;
+ extern unsigned int *htab_call_hpte_remove;
+ extern unsigned int *htab_call_hpte_updatepp;
+
+#ifdef CONFIG_PPC_64K_PAGES
+ extern unsigned int *ht64_call_hpte_insert1;
+ extern unsigned int *ht64_call_hpte_insert2;
+ extern unsigned int *ht64_call_hpte_remove;
+ extern unsigned int *ht64_call_hpte_updatepp;
+
+ make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
+ make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
+ make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
+ make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
+#endif /* CONFIG_PPC_64K_PAGES */
+
+ make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
+ make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
+ make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
+ make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
+}
+
void __init htab_initialize(void)
{
unsigned long table;
@@ -525,6 +537,8 @@ void __init htab_initialize(void)
mmu_linear_psize));
}
+ htab_finish_init();
+
DBG(" <- htab_initialize()\n");
}
#undef KB
@@ -787,16 +801,6 @@ void flush_hash_range(unsigned long number, int local)
}
}
-static inline void make_bl(unsigned int *insn_addr, void *func)
-{
- unsigned long funcp = *((unsigned long *)func);
- int offset = funcp - (unsigned long)insn_addr;
-
- *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
- flush_icache_range((unsigned long)insn_addr, 4+
- (unsigned long)insn_addr);
-}
-
/*
* low_hash_fault is called when we the low level hash code failed
* to instert a PTE due to an hypervisor error
@@ -815,28 +819,3 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address)
}
bad_page_fault(regs, address, SIGBUS);
}
-
-void __init htab_finish_init(void)
-{
- extern unsigned int *htab_call_hpte_insert1;
- extern unsigned int *htab_call_hpte_insert2;
- extern unsigned int *htab_call_hpte_remove;
- extern unsigned int *htab_call_hpte_updatepp;
-
-#ifdef CONFIG_PPC_64K_PAGES
- extern unsigned int *ht64_call_hpte_insert1;
- extern unsigned int *ht64_call_hpte_insert2;
- extern unsigned int *ht64_call_hpte_remove;
- extern unsigned int *ht64_call_hpte_updatepp;
-
- make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
- make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
- make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
- make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
-#endif /* CONFIG_PPC_64K_PAGES */
-
- make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
- make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
- make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
- make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
-}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 266b8b2ceac9..5615acc29527 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -153,7 +153,7 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
hpdp->pd = 0;
tlb->need_flush = 1;
pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
- HUGEPTE_TABLE_SIZE-1));
+ PGF_CACHENUM_MASK));
}
#ifdef CONFIG_PPC_64K_PAGES
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index b57fb3a2b7bb..0e53ca8f02fb 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -18,7 +18,6 @@
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9e30f968c184..3ff374697e34 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -22,7 +22,6 @@
#undef DEBUG
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -41,6 +40,7 @@
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
+#include <linux/poison.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -90,7 +90,7 @@ void free_initmem(void)
addr = (unsigned long)__init_begin;
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
- memset((void *)addr, 0xcc, PAGE_SIZE);
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index 8b6f522655a6..716a2906a24d 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -10,7 +10,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bitops.h>
@@ -321,7 +320,8 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit)
break;
}
- lmb.rmo_size = lmb.memory.region[0].size;
+ if (lmb.memory.region[0].size < lmb.rmo_size)
+ lmb.rmo_size = lmb.memory.region[0].size;
/* And truncate any reserves above the limit also. */
for (i = 0; i < lmb.reserved.cnt; i++) {
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 69f3b9a20beb..16fe027bbc12 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -18,7 +18,6 @@
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -114,15 +113,20 @@ void online_page(struct page *page)
num_physpages++;
}
-int __devinit add_memory(u64 start, u64 size)
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
+{
+ return hot_add_scn_to_nid(start);
+}
+#endif
+
+int __devinit arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdata;
struct zone *zone;
- int nid;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- nid = hot_add_scn_to_nid(start);
pgdata = NODE_DATA(nid);
start = (unsigned long)__va(start);
@@ -252,20 +256,22 @@ void __init do_init_bootmem(void)
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
+ /* Add active regions with valid PFNs */
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ unsigned long start_pfn, end_pfn;
+ start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
+ end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+ add_active_range(0, start_pfn, end_pfn);
+ }
+
/* Add all physical memory to the bootmem map, mark each area
* present.
*/
- for (i = 0; i < lmb.memory.cnt; i++) {
- unsigned long base = lmb.memory.region[i].base;
- unsigned long size = lmb_size_bytes(&lmb.memory, i);
#ifdef CONFIG_HIGHMEM
- if (base >= total_lowmem)
- continue;
- if (base + size > total_lowmem)
- size = total_lowmem - base;
+ free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
+#else
+ free_bootmem_with_active_regions(0, max_pfn);
#endif
- free_bootmem(base, size);
- }
/* reserve the sections we're already using */
for (i = 0; i < lmb.reserved.cnt; i++)
@@ -273,9 +279,8 @@ void __init do_init_bootmem(void)
lmb_size_bytes(&lmb.reserved, i));
/* XXX need to clip this if using highmem? */
- for (i = 0; i < lmb.memory.cnt; i++)
- memory_present(0, lmb_start_pfn(&lmb.memory, i),
- lmb_end_pfn(&lmb.memory, i));
+ sparse_memory_present_with_active_regions(0);
+
init_bootmem_done = 1;
}
@@ -284,10 +289,9 @@ void __init do_init_bootmem(void)
*/
void __init paging_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
- unsigned long zholes_size[MAX_NR_ZONES];
unsigned long total_ram = lmb_phys_mem_size();
unsigned long top_of_ram = lmb_end_of_DRAM();
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
#ifdef CONFIG_HIGHMEM
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
@@ -303,26 +307,13 @@ void __init paging_init(void)
top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
- /*
- * All pages are DMA-able so we put them all in the DMA zone.
- */
- memset(zones_size, 0, sizeof(zones_size));
- memset(zholes_size, 0, sizeof(zholes_size));
-
- zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
- zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
-
#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
- zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
- zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
+ max_zone_pfns[0] = total_lowmem >> PAGE_SHIFT;
+ max_zone_pfns[1] = top_of_ram >> PAGE_SHIFT;
#else
- zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
- zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
-#endif /* CONFIG_HIGHMEM */
-
- free_area_init_node(0, NODE_DATA(0), zones_size,
- __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
+ max_zone_pfns[0] = top_of_ram >> PAGE_SHIFT;
+#endif
+ free_area_init_nodes(max_zone_pfns);
}
#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
index e326e4249e1a..792086b01000 100644
--- a/arch/powerpc/mm/mmu_context_32.c
+++ b/arch/powerpc/mm/mmu_context_32.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/init.h>
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
index 65d18dca266f..90a06ac02d5e 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -10,7 +10,6 @@
*
*/
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -44,7 +43,9 @@ again:
return err;
if (index > MAX_CONTEXT) {
+ spin_lock(&mmu_context_lock);
idr_remove(&mmu_context_idr, index);
+ spin_unlock(&mmu_context_lock);
return -ENOMEM;
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index aa98cb3b59d8..43c272075e1a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -39,96 +39,6 @@ static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
static int min_common_depth;
static int n_mem_addr_cells, n_mem_size_cells;
-/*
- * We need somewhere to store start/end/node for each region until we have
- * allocated the real node_data structures.
- */
-#define MAX_REGIONS (MAX_LMB_REGIONS*2)
-static struct {
- unsigned long start_pfn;
- unsigned long end_pfn;
- int nid;
-} init_node_data[MAX_REGIONS] __initdata;
-
-int __init early_pfn_to_nid(unsigned long pfn)
-{
- unsigned int i;
-
- for (i = 0; init_node_data[i].end_pfn; i++) {
- unsigned long start_pfn = init_node_data[i].start_pfn;
- unsigned long end_pfn = init_node_data[i].end_pfn;
-
- if ((start_pfn <= pfn) && (pfn < end_pfn))
- return init_node_data[i].nid;
- }
-
- return -1;
-}
-
-void __init add_region(unsigned int nid, unsigned long start_pfn,
- unsigned long pages)
-{
- unsigned int i;
-
- dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n",
- nid, start_pfn, pages);
-
- for (i = 0; init_node_data[i].end_pfn; i++) {
- if (init_node_data[i].nid != nid)
- continue;
- if (init_node_data[i].end_pfn == start_pfn) {
- init_node_data[i].end_pfn += pages;
- return;
- }
- if (init_node_data[i].start_pfn == (start_pfn + pages)) {
- init_node_data[i].start_pfn -= pages;
- return;
- }
- }
-
- /*
- * Leave last entry NULL so we dont iterate off the end (we use
- * entry.end_pfn to terminate the walk).
- */
- if (i >= (MAX_REGIONS - 1)) {
- printk(KERN_ERR "WARNING: too many memory regions in "
- "numa code, truncating\n");
- return;
- }
-
- init_node_data[i].start_pfn = start_pfn;
- init_node_data[i].end_pfn = start_pfn + pages;
- init_node_data[i].nid = nid;
-}
-
-/* We assume init_node_data has no overlapping regions */
-void __init get_region(unsigned int nid, unsigned long *start_pfn,
- unsigned long *end_pfn, unsigned long *pages_present)
-{
- unsigned int i;
-
- *start_pfn = -1UL;
- *end_pfn = *pages_present = 0;
-
- for (i = 0; init_node_data[i].end_pfn; i++) {
- if (init_node_data[i].nid != nid)
- continue;
-
- *pages_present += init_node_data[i].end_pfn -
- init_node_data[i].start_pfn;
-
- if (init_node_data[i].start_pfn < *start_pfn)
- *start_pfn = init_node_data[i].start_pfn;
-
- if (init_node_data[i].end_pfn > *end_pfn)
- *end_pfn = init_node_data[i].end_pfn;
- }
-
- /* We didnt find a matching region, return start/end as 0 */
- if (*start_pfn == -1UL)
- *start_pfn = 0;
-}
-
static void __cpuinit map_cpu_to_node(int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
@@ -159,12 +69,12 @@ static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
{
unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
struct device_node *cpu_node = NULL;
- unsigned int *interrupt_server, *reg;
+ const unsigned int *interrupt_server, *reg;
int len;
while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
/* Try interrupt server first */
- interrupt_server = (unsigned int *)get_property(cpu_node,
+ interrupt_server = get_property(cpu_node,
"ibm,ppc-interrupt-server#s", &len);
len = len / sizeof(u32);
@@ -175,8 +85,7 @@ static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
return cpu_node;
}
} else {
- reg = (unsigned int *)get_property(cpu_node,
- "reg", &len);
+ reg = get_property(cpu_node, "reg", &len);
if (reg && (len > 0) && (reg[0] == hw_cpuid))
return cpu_node;
}
@@ -186,9 +95,9 @@ static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
}
/* must hold reference to node during call */
-static int *of_get_associativity(struct device_node *dev)
+static const int *of_get_associativity(struct device_node *dev)
{
- return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
+ return get_property(dev, "ibm,associativity", NULL);
}
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
@@ -197,7 +106,7 @@ static int *of_get_associativity(struct device_node *dev)
static int of_node_to_nid_single(struct device_node *device)
{
int nid = -1;
- unsigned int *tmp;
+ const unsigned int *tmp;
if (min_common_depth == -1)
goto out;
@@ -255,7 +164,7 @@ EXPORT_SYMBOL_GPL(of_node_to_nid);
static int __init find_min_common_depth(void)
{
int depth;
- unsigned int *ref_points;
+ const unsigned int *ref_points;
struct device_node *rtas_root;
unsigned int len;
@@ -270,7 +179,7 @@ static int __init find_min_common_depth(void)
* configuration (should be all 0's) and the second is for a normal
* NUMA configuration.
*/
- ref_points = (unsigned int *)get_property(rtas_root,
+ ref_points = get_property(rtas_root,
"ibm,associativity-reference-points", &len);
if ((len >= 1) && ref_points) {
@@ -297,7 +206,7 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
of_node_put(memory);
}
-static unsigned long __devinit read_n_cells(int n, unsigned int **buf)
+static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
{
unsigned long result = 0;
@@ -334,7 +243,7 @@ out:
return nid;
}
-static int cpu_numa_callback(struct notifier_block *nfb,
+static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -435,15 +344,13 @@ static int __init parse_numa_properties(void)
unsigned long size;
int nid;
int ranges;
- unsigned int *memcell_buf;
+ const unsigned int *memcell_buf;
unsigned int len;
- memcell_buf = (unsigned int *)get_property(memory,
+ memcell_buf = get_property(memory,
"linux,usable-memory", &len);
if (!memcell_buf || len <= 0)
- memcell_buf =
- (unsigned int *)get_property(memory, "reg",
- &len);
+ memcell_buf = get_property(memory, "reg", &len);
if (!memcell_buf || len <= 0)
continue;
@@ -471,8 +378,8 @@ new_range:
continue;
}
- add_region(nid, start >> PAGE_SHIFT,
- size >> PAGE_SHIFT);
+ add_active_range(nid, start >> PAGE_SHIFT,
+ (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
if (--ranges)
goto new_range;
@@ -485,6 +392,7 @@ static void __init setup_nonnuma(void)
{
unsigned long top_of_ram = lmb_end_of_DRAM();
unsigned long total_ram = lmb_phys_mem_size();
+ unsigned long start_pfn, end_pfn;
unsigned int i;
printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
@@ -492,9 +400,11 @@ static void __init setup_nonnuma(void)
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
- for (i = 0; i < lmb.memory.cnt; ++i)
- add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT,
- lmb_size_pages(&lmb.memory, i));
+ for (i = 0; i < lmb.memory.cnt; ++i) {
+ start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
+ end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+ add_active_range(0, start_pfn, end_pfn);
+ }
node_set_online(0);
}
@@ -609,14 +519,15 @@ static void __init *careful_allocation(int nid, unsigned long size,
return (void *)ret;
}
+static struct notifier_block __cpuinitdata ppc64_numa_nb = {
+ .notifier_call = cpu_numa_callback,
+ .priority = 1 /* Must run before sched domains notifier. */
+};
+
void __init do_init_bootmem(void)
{
int nid;
unsigned int i;
- static struct notifier_block ppc64_numa_nb = {
- .notifier_call = cpu_numa_callback,
- .priority = 1 /* Must run before sched domains notifier. */
- };
min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
@@ -632,11 +543,11 @@ void __init do_init_bootmem(void)
(void *)(unsigned long)boot_cpuid);
for_each_online_node(nid) {
- unsigned long start_pfn, end_pfn, pages_present;
+ unsigned long start_pfn, end_pfn;
unsigned long bootmem_paddr;
unsigned long bootmap_pages;
- get_region(nid, &start_pfn, &end_pfn, &pages_present);
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
/* Allocate the node structure node local if possible */
NODE_DATA(nid) = careful_allocation(nid,
@@ -669,19 +580,7 @@ void __init do_init_bootmem(void)
init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
start_pfn, end_pfn);
- /* Add free regions on this node */
- for (i = 0; init_node_data[i].end_pfn; i++) {
- unsigned long start, end;
-
- if (init_node_data[i].nid != nid)
- continue;
-
- start = init_node_data[i].start_pfn << PAGE_SHIFT;
- end = init_node_data[i].end_pfn << PAGE_SHIFT;
-
- dbg("free_bootmem %lx %lx\n", start, end - start);
- free_bootmem_node(NODE_DATA(nid), start, end - start);
- }
+ free_bootmem_with_active_regions(nid, end_pfn);
/* Mark reserved regions on this node */
for (i = 0; i < lmb.reserved.cnt; i++) {
@@ -712,44 +611,16 @@ void __init do_init_bootmem(void)
}
}
- /* Add regions into sparsemem */
- for (i = 0; init_node_data[i].end_pfn; i++) {
- unsigned long start, end;
-
- if (init_node_data[i].nid != nid)
- continue;
-
- start = init_node_data[i].start_pfn;
- end = init_node_data[i].end_pfn;
-
- memory_present(nid, start, end);
- }
+ sparse_memory_present_with_active_regions(nid);
}
}
void __init paging_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
- unsigned long zholes_size[MAX_NR_ZONES];
- int nid;
-
- memset(zones_size, 0, sizeof(zones_size));
- memset(zholes_size, 0, sizeof(zholes_size));
-
- for_each_online_node(nid) {
- unsigned long start_pfn, end_pfn, pages_present;
-
- get_region(nid, &start_pfn, &end_pfn, &pages_present);
-
- zones_size[ZONE_DMA] = end_pfn - start_pfn;
- zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - pages_present;
-
- dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,
- zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);
-
- free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn,
- zholes_size);
- }
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = {
+ lmb_end_of_DRAM() >> PAGE_SHIFT
+ };
+ free_area_init_nodes(max_zone_pfns);
}
static int __init early_numa(char *p)
@@ -786,10 +657,10 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start, size;
int ranges;
- unsigned int *memcell_buf;
+ const unsigned int *memcell_buf;
unsigned int len;
- memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
+ memcell_buf = get_property(memory, "reg", &len);
if (!memcell_buf || len <= 0)
continue;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 90628601fac7..8fcacb0239da 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -20,7 +20,6 @@
*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 7b278d83739e..b1da03165496 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -22,7 +22,6 @@
*
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 2ed43a493b31..7cceb2c44cb9 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6a8bf6c6000e..d3733912adb4 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -16,13 +16,14 @@
#undef DEBUG
-#include <linux/config.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <linux/compiler.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -51,9 +52,32 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
}
-static inline void create_slbe(unsigned long ea, unsigned long flags,
- unsigned long entry)
+static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
+ unsigned long entry)
{
+ /*
+ * Clear the ESID first so the entry is not valid while we are
+ * updating it.
+ */
+ get_slb_shadow()->save_area[entry].esid = 0;
+ barrier();
+ get_slb_shadow()->save_area[entry].vsid = vsid;
+ barrier();
+ get_slb_shadow()->save_area[entry].esid = esid;
+
+}
+
+static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
+ unsigned long entry)
+{
+ /*
+ * Updating the shadow buffer before writing the SLB ensures
+ * we don't get a stale entry here if we get preempted by PHYP
+ * between these two statements.
+ */
+ slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
+ entry);
+
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, flags)),
"r" (mk_esid_data(ea, entry))
@@ -78,6 +102,10 @@ void slb_flush_and_rebolt(void)
if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
ksp_esid_data &= ~SLB_ESID_V;
+ /* Only third entry (stack) may change here so only resave that */
+ slb_shadow_update(ksp_esid_data,
+ mk_vsid_data(ksp_esid_data, lflags), 2);
+
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
asm volatile("isync\n"
@@ -210,9 +238,9 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
- create_slbe(PAGE_OFFSET, lflags, 0);
+ create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
- create_slbe(VMALLOC_START, vflags, 1);
+ create_shadowed_slbe(VMALLOC_START, vflags, 1);
/* We don't bolt the stack for the time being - we're in boot,
* so the stack is in the bolted segment. By the time it goes
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 8548dcf8ef8b..dbc1abbde038 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -14,7 +14,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 691320c90b78..eeeacab548e6 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -12,7 +12,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 02eb23e036d5..925ff70be8ba 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index e7449b068c82..b58baa65c4a7 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -22,7 +22,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -147,6 +146,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
psize = mmu_huge_psize;
#else
BUG();
+ psize = pte_pagesize_index(pte); /* shutup gcc */
#endif
} else
psize = pte_pagesize_index(pte);
OpenPOWER on IntegriCloud