diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/init_32.c | 36 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 29 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 19 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_low_64e.S | 1 |
5 files changed, 20 insertions, 73 deletions
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 3ef5084b90ca..9ddcfb4dc139 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -242,39 +242,3 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -#ifdef CONFIG_PROC_KCORE -static struct kcore_list kcore_vmem; - -static int __init setup_kcore(void) -{ - int i; - - for (i = 0; i < lmb.memory.cnt; i++) { - unsigned long base; - unsigned long size; - struct kcore_list *kcore_mem; - - base = lmb.memory.region[i].base; - size = lmb.memory.region[i].size; - - kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); - if (!kcore_mem) - panic("%s: kmalloc failed\n", __func__); - - /* must stay under 32 bits */ - if ( 0xfffffffful - (unsigned long)__va(base) < size) { - size = 0xfffffffful - (unsigned long)(__va(base)); - printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n", - size); - } - - kclist_add(kcore_mem, __va(base), size); - } - - kclist_add(&kcore_vmem, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); - - return 0; -} -module_init(setup_kcore); -#endif diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 31582329cd67..335c578b9cc3 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -109,35 +109,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -#ifdef CONFIG_PROC_KCORE -static struct kcore_list kcore_vmem; - -static int __init setup_kcore(void) -{ - int i; - - for (i=0; i < lmb.memory.cnt; i++) { - unsigned long base, size; - struct kcore_list *kcore_mem; - - base = lmb.memory.region[i].base; - size = lmb.memory.region[i].size; - - /* GFP_ATOMIC to avoid might_sleep warnings during boot */ - kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); - if (!kcore_mem) - panic("%s: kmalloc failed\n", __func__); - - kclist_add(kcore_mem, __va(base), size); - } - - kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); - - return 0; -} -module_init(setup_kcore); -#endif - static void pgd_ctor(void *addr) { memset(addr, 0, PGD_TABLE_SIZE); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 579382c163a9..59736317bf0e 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -143,8 +143,8 @@ int arch_add_memory(int nid, u64 start, u64 size) * memory regions, find holes and callback for contiguous regions. */ int -walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, - int (*func)(unsigned long, unsigned long, void *)) +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)) { struct lmb_property res; unsigned long pfn, len; @@ -166,7 +166,7 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, } return ret; } -EXPORT_SYMBOL_GPL(walk_memory_resource); +EXPORT_SYMBOL_GPL(walk_system_ram_range); /* * Initialize the bootmem system and give it all the memory we @@ -372,7 +372,7 @@ void __init mem_init(void) printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " "%luk reserved, %luk data, %luk bss, %luk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 83f1551ec2c9..53040931de32 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -30,6 +30,8 @@ #include <asm/tlbflush.h> #include <asm/tlb.h> +#include "mmu_decl.h" + DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); #ifdef CONFIG_SMP @@ -166,7 +168,7 @@ struct page * maybe_pte_to_page(pte_t pte) * support falls into the same category. */ -static pte_t set_pte_filter(pte_t pte) +static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || @@ -175,6 +177,17 @@ static pte_t set_pte_filter(pte_t pte) if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { +#ifdef CONFIG_8xx + /* On 8xx, cache control instructions (particularly + * "dcbst" from flush_dcache_icache) fault as write + * operation if there is an unpopulated TLB entry + * for the address in question. To workaround that, + * we invalidate the TLB here, thus avoiding dcbst + * misbehaviour. + */ + /* 8xx doesn't care about PID, size or ind args */ + _tlbil_va(addr, 0, 0, 0); +#endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } @@ -194,7 +207,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ -static pte_t set_pte_filter(pte_t pte) +static pte_t set_pte_filter(pte_t pte, unsigned long addr) { struct page *pg; @@ -276,7 +289,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, * this context might not have been activated yet when this * is called. */ - pte = set_pte_filter(pte); + pte = set_pte_filter(pte, addr); /* Perform the setting of the PTE */ __set_pte_at(mm, addr, ptep, pte, 0); diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index ef1cccf71173..f288279e679d 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -18,7 +18,6 @@ #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/pgtable.h> -#include <asm/reg.h> #include <asm/exception-64e.h> #include <asm/ppc-opcode.h> |