From 45cd5290bfd358e9885c0bf47a8c46671a92f716 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Jan 2012 23:08:07 +0000 Subject: ARM: add dma coherent region reporting via procfs Add a new seqfile for reporting coherent DMA allocations. This contains the address range, size and the function which was used to allocate each region, allowing these allocations to be viewed in much the same way as /proc/vmallocinfo. The DMA coherent region has limited space, so this allows allocation failures to be viewed, as well as finding out how much space is being used. Make sure this file is only readable by root - same as vmallocinfo - to prevent information leakage. Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 20 ++++++++----- arch/arm/mm/vmregion.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++- arch/arm/mm/vmregion.h | 5 +++- 3 files changed, 92 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1aa664a1999f..db23ae4aaaab 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -214,7 +214,8 @@ static int __init consistent_init(void) core_initcall(consistent_init); static void * -__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, + const void *caller) { struct arm_vmregion *c; size_t align; @@ -241,7 +242,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) * Allocate a virtual address in the consistent mapping region. */ c = arm_vmregion_alloc(&consistent_head, align, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); + gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller); if (c) { pte_t *pte; int idx = CONSISTENT_PTE_INDEX(c->vm_start); @@ -320,14 +321,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size) #else /* !CONFIG_MMU */ -#define __dma_alloc_remap(page, size, gfp, prot) page_address(page) +#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page) #define __dma_free_remap(addr, size) do { } while (0) #endif /* CONFIG_MMU */ static void * __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, - pgprot_t prot) + pgprot_t prot, const void *caller) { struct page *page; void *addr; @@ -349,7 +350,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, return NULL; if (!arch_is_coherent()) - addr = __dma_alloc_remap(page, size, gfp, prot); + addr = __dma_alloc_remap(page, size, gfp, prot, caller); else addr = page_address(page); @@ -374,7 +375,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf return memory; return __dma_alloc(dev, size, handle, gfp, - pgprot_dmacoherent(pgprot_kernel)); + pgprot_dmacoherent(pgprot_kernel), + __builtin_return_address(0)); } EXPORT_SYMBOL(dma_alloc_coherent); @@ -386,7 +388,8 @@ void * dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { return __dma_alloc(dev, size, handle, gfp, - pgprot_writecombine(pgprot_kernel)); + pgprot_writecombine(pgprot_kernel), + __builtin_return_address(0)); } EXPORT_SYMBOL(dma_alloc_writecombine); @@ -723,6 +726,9 @@ EXPORT_SYMBOL(dma_set_mask); static int __init dma_debug_do_init(void) { +#ifdef CONFIG_MMU + arm_vmregion_create_proc("dma-mappings", &consistent_head); +#endif dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 036fdbfdd62f..a631016e1f8f 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c @@ -1,5 +1,8 @@ +#include #include #include +#include +#include #include #include "vmregion.h" @@ -36,7 +39,7 @@ struct arm_vmregion * arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, - size_t size, gfp_t gfp) + size_t size, gfp_t gfp, const void *caller) { unsigned long start = head->vm_start, addr = head->vm_end; unsigned long flags; @@ -52,6 +55,8 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, if (!new) goto out; + new->caller = caller; + spin_lock_irqsave(&head->vm_lock, flags); addr = rounddown(addr - size, align); @@ -129,3 +134,72 @@ void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) kfree(c); } + +#ifdef CONFIG_PROC_FS +static int arm_vmregion_show(struct seq_file *m, void *p) +{ + struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); + + seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, + c->vm_end - c->vm_start); + if (c->caller) + seq_printf(m, " %pS", (void *)c->caller); + seq_putc(m, '\n'); + return 0; +} + +static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) +{ + struct arm_vmregion_head *h = m->private; + spin_lock_irq(&h->vm_lock); + return seq_list_start(&h->vm_list, *pos); +} + +static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct arm_vmregion_head *h = m->private; + return seq_list_next(p, &h->vm_list, pos); +} + +static void arm_vmregion_stop(struct seq_file *m, void *p) +{ + struct arm_vmregion_head *h = m->private; + spin_unlock_irq(&h->vm_lock); +} + +static const struct seq_operations arm_vmregion_ops = { + .start = arm_vmregion_start, + .stop = arm_vmregion_stop, + .next = arm_vmregion_next, + .show = arm_vmregion_show, +}; + +static int arm_vmregion_open(struct inode *inode, struct file *file) +{ + struct arm_vmregion_head *h = PDE(inode)->data; + int ret = seq_open(file, &arm_vmregion_ops); + if (!ret) { + struct seq_file *m = file->private_data; + m->private = h; + } + return ret; +} + +static const struct file_operations arm_vmregion_fops = { + .open = arm_vmregion_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) +{ + proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); + return 0; +} +#else +int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) +{ + return 0; +} +#endif diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 15e9f044db9f..162be662c088 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h @@ -19,11 +19,14 @@ struct arm_vmregion { unsigned long vm_end; struct page *vm_pages; int vm_active; + const void *caller; }; -struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); +struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); +int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); + #endif -- cgit v1.2.1 From 94e5a85b3be0ce109d26aa6812b2a02c518a0e4b Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 18 Jan 2012 15:32:49 +0000 Subject: ARM: earlier initialization of vectors page Initialize the contents of the vectors page immediately after we allocate the page, but before we map it. This avoids any possible aliases with other mappings which may need to be flushed after the page has been mapped irrespective of the cache type. We follow this later with a flush_cache_all() after all static memory mappings have been initialized, which ensures that this is safe from any cache effects. Tested-by: Catalin Marinas Reviewed-by: Catalin Marinas Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 94c5a0c94f5e..c1263adc2a26 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -997,11 +997,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc) { struct map_desc map; unsigned long addr; + void *vectors; /* * Allocate the vector page early. */ - vectors_page = early_alloc(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE); + + early_trap_init(vectors); for (addr = VMALLOC_START; addr; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); @@ -1041,7 +1044,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ - map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); + map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; map.type = MT_HIGH_VECTORS; -- cgit v1.2.1 From de27c308223dc9bd48de9742c7c2b53a15c1b012 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 2 Jul 2011 14:46:27 +0100 Subject: ARM: pgtable: move TOP_PTE address definitions to arch/arm/mm/mm.h Move the TOP_PTE address definitions to one central place so that it's easy to discover what they're being used for. This helps to ensure that there are no overlaps. Reviewed-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/copypage-v4mc.c | 10 +++------- arch/arm/mm/copypage-v6.c | 16 ++++++---------- arch/arm/mm/copypage-xscale.c | 6 ------ arch/arm/mm/flush.c | 8 +++----- arch/arm/mm/mm.h | 13 +++++++++++++ 5 files changed, 25 insertions(+), 28 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7d0a8c230342..87a23ca1fc61 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -23,10 +23,6 @@ #include "mm.h" -/* - * 0xffff8000 to 0xffffffff is reserved for any ARM architecture - * specific hacks for copying pages efficiently. - */ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) @@ -78,10 +74,10 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, raw_spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); - flush_tlb_kernel_page(0xffff8000); + set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); + flush_tlb_kernel_page(COPYPAGE_MINICACHE); - mc_copy_user_page((void *)0xffff8000, kto); + mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); raw_spin_unlock(&minicache_lock); diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 3d9a1552cef6..c00a75014435 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -24,9 +24,6 @@ #error FIX ME #endif -#define from_address (0xffff8000) -#define to_address (0xffffc000) - static DEFINE_RAW_SPINLOCK(v6_lock); /* @@ -90,11 +87,11 @@ static void v6_copy_user_highpage_aliasing(struct page *to, */ raw_spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); + kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); + kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); - kfrom = from_address + (offset << PAGE_SHIFT); - kto = to_address + (offset << PAGE_SHIFT); + set_pte_ext(TOP_PTE(kfrom), pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(kto), pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); flush_tlb_kernel_page(kfrom); flush_tlb_kernel_page(kto); @@ -111,8 +108,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, */ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) { - unsigned int offset = CACHE_COLOUR(vaddr); - unsigned long to = to_address + (offset << PAGE_SHIFT); + unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); /* FIXME: not highmem safe */ discard_old_kernel_data(page_address(page)); @@ -123,7 +119,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad */ raw_spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(to), pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 610c24ced310..90f3bb58eafa 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -23,12 +23,6 @@ #include "mm.h" -/* - * 0xffff8000 to 0xffffffff is reserved for any ARM architecture - * specific hacks for copying pages efficiently. - */ -#define COPYPAGE_MINICACHE 0xffff8000 - #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1a8d4aa821be..f4d407af4690 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -23,11 +23,9 @@ #ifdef CONFIG_CPU_CACHE_VIPT -#define ALIAS_FLUSH_START 0xffff4000 - static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) { - unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); + unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); const int zero = 0; set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); @@ -46,8 +44,8 @@ static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned unsigned long offset = vaddr & (PAGE_SIZE - 1); unsigned long to; - set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); - to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; + set_pte_ext(TOP_PTE(FLUSH_ALIAS_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); + to = FLUSH_ALIAS_START + (colour << PAGE_SHIFT) + offset; flush_tlb_kernel_page(to); flush_icache_range(to, to + len); } diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 70f6d3ea4834..6ee1ff2c1da6 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -5,6 +5,19 @@ extern pmd_t *top_pmd; #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) +/* + * 0xffff8000 to 0xffffffff is reserved for any ARM architecture + * specific hacks for copying pages efficiently, while 0xffff4000 + * is reserved for VIPT aliasing flushing by generic code. + * + * Note that we don't allow VIPT aliasing caches with SMP. + */ +#define COPYPAGE_MINICACHE 0xffff8000 +#define COPYPAGE_V6_FROM 0xffff8000 +#define COPYPAGE_V6_TO 0xffffc000 +/* PFN alias flushing, for VIPT caches */ +#define FLUSH_ALIAS_START 0xffff4000 + static inline pmd_t *pmd_off_k(unsigned long virt) { return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); -- cgit v1.2.1 From 6e78df176141f2cb673bed7fa47825e3c6a8719f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 2 Jul 2011 14:56:42 +0100 Subject: ARM: pgtable: use mk_pte rather than pfn_pte(page_to_pfn()) mk_pte is provided to do this translation for us, so use it rather than open-coding it in the copypage code. Reviewed-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/copypage-v4mc.c | 2 +- arch/arm/mm/copypage-v6.c | 6 +++--- arch/arm/mm/copypage-xscale.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 87a23ca1fc61..e4dc2f491123 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -74,7 +74,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, raw_spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); + set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0); flush_tlb_kernel_page(COPYPAGE_MINICACHE); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index c00a75014435..86524591b1b5 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -90,8 +90,8 @@ static void v6_copy_user_highpage_aliasing(struct page *to, kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); - set_pte_ext(TOP_PTE(kfrom), pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); - set_pte_ext(TOP_PTE(kto), pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(kfrom), mk_pte(from, PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(kto), mk_pte(to, PAGE_KERNEL), 0); flush_tlb_kernel_page(kfrom); flush_tlb_kernel_page(kto); @@ -119,7 +119,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad */ raw_spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(to), pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(to), mk_pte(page, PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 90f3bb58eafa..2497dcf6d9ae 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -94,7 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, raw_spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); + set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0); flush_tlb_kernel_page(COPYPAGE_MINICACHE); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); -- cgit v1.2.1 From 67ece1443174d852e71c42facb3e2d7dd338c88a Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 2 Jul 2011 15:20:44 +0100 Subject: ARM: pgtable: consolidate set_pte_ext(TOP_PTE,...) + tlb flush A number of places establish a PTE in our top page table and immediately flush the TLB. Rather than having this at every callsite, provide an inline function for this purpose. This changes some global tlb flushes to be local; each time we setup one of these mappings, we always do it with preemption disabled which would prevent us migrating to another CPU. Reviewed-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/copypage-v4mc.c | 3 +-- arch/arm/mm/copypage-v6.c | 10 +++------- arch/arm/mm/copypage-xscale.c | 3 +-- arch/arm/mm/flush.c | 10 ++++------ arch/arm/mm/highmem.c | 13 +++++-------- arch/arm/mm/mm.h | 6 ++++++ 6 files changed, 20 insertions(+), 25 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index e4dc2f491123..6e06180a8bc0 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -74,8 +74,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, raw_spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0); - flush_tlb_kernel_page(COPYPAGE_MINICACHE); + set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 86524591b1b5..29c770463e41 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -90,11 +90,8 @@ static void v6_copy_user_highpage_aliasing(struct page *to, kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); - set_pte_ext(TOP_PTE(kfrom), mk_pte(from, PAGE_KERNEL), 0); - set_pte_ext(TOP_PTE(kto), mk_pte(to, PAGE_KERNEL), 0); - - flush_tlb_kernel_page(kfrom); - flush_tlb_kernel_page(kto); + set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL)); + set_top_pte(kto, mk_pte(to, PAGE_KERNEL)); copy_page((void *)kto, (void *)kfrom); @@ -119,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad */ raw_spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(to), mk_pte(page, PAGE_KERNEL), 0); - flush_tlb_kernel_page(to); + set_top_pte(to, mk_pte(page, PAGE_KERNEL)); clear_page((void *)to); raw_spin_unlock(&v6_lock); diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 2497dcf6d9ae..804eeddda97f 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -94,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, raw_spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0); - flush_tlb_kernel_page(COPYPAGE_MINICACHE); + set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index f4d407af4690..4d0b70f035eb 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -28,8 +28,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); const int zero = 0; - set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); - flush_tlb_kernel_page(to); + set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); asm( "mcrr p15, 0, %1, %0, c14\n" " mcr p15, 0, %2, c7, c10, 4" @@ -40,13 +39,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) { - unsigned long colour = CACHE_COLOUR(vaddr); + unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); unsigned long offset = vaddr & (PAGE_SIZE - 1); unsigned long to; - set_pte_ext(TOP_PTE(FLUSH_ALIAS_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); - to = FLUSH_ALIAS_START + (colour << PAGE_SHIFT) + offset; - flush_tlb_kernel_page(to); + set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); + to = va + offset; flush_icache_range(to, to + len); } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 807c0573abbe..35352517a5d4 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -71,13 +71,12 @@ void *__kmap_atomic(struct page *page) */ BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif - set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); /* * When debugging is off, kunmap_atomic leaves the previous mapping - * in place, so this TLB flush ensures the TLB is updated with the - * new mapping. + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. */ - local_flush_tlb_kernel_page(vaddr); + set_top_pte(vaddr, mk_pte(page, kmap_prot)); return (void *)vaddr; } @@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); - set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); - local_flush_tlb_kernel_page(vaddr); + set_top_pte(vaddr, __pte(0)); #else (void) idx; /* to kill a warning */ #endif @@ -123,8 +121,7 @@ void *kmap_atomic_pfn(unsigned long pfn) #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif - set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); - local_flush_tlb_kernel_page(vaddr); + set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); return (void *)vaddr; } diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 6ee1ff2c1da6..a4e7febeb6a1 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -18,6 +18,12 @@ extern pmd_t *top_pmd; /* PFN alias flushing, for VIPT caches */ #define FLUSH_ALIAS_START 0xffff4000 +static inline void set_top_pte(unsigned long va, pte_t pte) +{ + set_pte_ext(TOP_PTE(va), pte, 0); + local_flush_tlb_kernel_page(va); +} + static inline pmd_t *pmd_off_k(unsigned long virt) { return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); -- cgit v1.2.1 From 0d31fe47b0f62e6546779eae2fc9b2e024aff4ce Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 4 Jul 2011 11:22:27 +0100 Subject: ARM: pgtable: provide get_top_pte() to complement set_top_pte() Provide get_top_pte() to complement set_top_pte(), moving the only users of TOP_PTE to arch/arm/mm/mm.h. Reviewed-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/highmem.c | 8 +++----- arch/arm/mm/mm.h | 5 +++++ 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 35352517a5d4..3a9e8aa19759 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -69,7 +69,7 @@ void *__kmap_atomic(struct page *page) * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ - BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); + BUG_ON(!pte_none(get_top_pte(vaddr))); #endif /* * When debugging is off, kunmap_atomic leaves the previous mapping @@ -119,7 +119,7 @@ void *kmap_atomic_pfn(unsigned long pfn) idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); + BUG_ON(!pte_none(get_top_pte(vaddr))); #endif set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); @@ -129,11 +129,9 @@ void *kmap_atomic_pfn(unsigned long pfn) struct page *kmap_atomic_to_page(const void *ptr) { unsigned long vaddr = (unsigned long)ptr; - pte_t *pte; if (vaddr < FIXADDR_START) return virt_to_page(ptr); - pte = TOP_PTE(vaddr); - return pte_page(*pte); + return pte_page(get_top_pte(vaddr)); } diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index a4e7febeb6a1..1651d5aa04a6 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -24,6 +24,11 @@ static inline void set_top_pte(unsigned long va, pte_t pte) local_flush_tlb_kernel_page(va); } +static inline pte_t get_top_pte(unsigned long va) +{ + return *TOP_PTE(va); +} + static inline pmd_t *pmd_off_k(unsigned long virt) { return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); -- cgit v1.2.1 From 60db4fcf14c6b562399579473a67e51eed694ff4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 4 Jul 2011 11:25:53 +0100 Subject: ARM: pgtable: get rid of TOP_PTE() Get rid of the TOP_PTE() macro as we now have proper accessor functions instead. No one should be directly referencing the top pte table anymore. Reviewed-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/mm.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 1651d5aa04a6..27f4a619b35d 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -3,8 +3,6 @@ /* the upper-most page table pointer */ extern pmd_t *top_pmd; -#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) - /* * 0xffff8000 to 0xffffffff is reserved for any ARM architecture * specific hacks for copying pages efficiently, while 0xffff4000 @@ -20,13 +18,15 @@ extern pmd_t *top_pmd; static inline void set_top_pte(unsigned long va, pte_t pte) { - set_pte_ext(TOP_PTE(va), pte, 0); + pte_t *ptep = pte_offset_kernel(top_pmd, va); + set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(va); } static inline pte_t get_top_pte(unsigned long va) { - return *TOP_PTE(va); + pte_t *ptep = pte_offset_kernel(top_pmd, va); + return *ptep; } static inline pmd_t *pmd_off_k(unsigned long virt) -- cgit v1.2.1 From 195864cf3d6f5b6b743793bda3aaa2ff65d322ae Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 19 Jan 2012 10:05:41 +0000 Subject: ARM: move CP15 definitions to separate header file Avoid namespace conflicts with drivers over the CP15 definitions by moving CP15 related prototypes and definitions to a private header file. Acked-by: Stephen Warren Tested-by: Stephen Warren [Tegra] Acked-by: H Hartley Sweeten Tested-by: H Hartley Sweeten [EP93xx] Acked-by: Nicolas Pitre Acked-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 2 +- arch/arm/mm/cache-feroceon-l2.c | 1 + arch/arm/mm/cache-tauros2.c | 1 + arch/arm/mm/cache-xsc3l2.c | 2 +- arch/arm/mm/ioremap.c | 1 + arch/arm/mm/mmu.c | 1 + arch/arm/mm/pgd.c | 1 + 7 files changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index caf14dc059e5..78459b8a2a1d 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include #include "fault.h" diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index e0b0e7a4ec68..dd3d59122cc3 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -15,6 +15,7 @@ #include #include #include +#include #include /* diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c index 50868651890f..1fbca05fe906 100644 --- a/arch/arm/mm/cache-tauros2.c +++ b/arch/arm/mm/cache-tauros2.c @@ -16,6 +16,7 @@ #include #include +#include #include diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 5a32020471e3..6c3edeb66e74 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -18,7 +18,7 @@ */ #include #include -#include +#include #include #include diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 80632e8d7538..66daf17b5e33 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c1263adc2a26..f77f1dbbdf59 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index a3e78ccabd65..0acb089d0f70 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include -- cgit v1.2.1 From d9277d51a8eeaa097d3c1385f458c99d65ffc4f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 1 Feb 2012 11:16:51 +0100 Subject: ARM: 7312/1: only show modules in the memory layout for MODULES=y MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This line is irritating and wrong when modules are not supported, so don't show it then. Signed-off-by: Uwe Kleine-König Acked-by: Nicolas Pitre Acked-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mm/init.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 6ec1226fc62d..42d906f89964 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -659,7 +659,9 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif +#ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif " .text : 0x%p" " - 0x%p" " (%4d kB)\n" " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .data : 0x%p" " - 0x%p" " (%4d kB)\n" @@ -678,7 +680,9 @@ void __init mem_init(void) MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * (PAGE_SIZE)), #endif +#ifdef CONFIG_MODULES MLM(MODULES_VADDR, MODULES_END), +#endif MLK_ROUNDUP(_text, _etext), MLK_ROUNDUP(__init_begin, __init_end), -- cgit v1.2.1 From f5274c2d0d8d91076af8605187d762dfa0b92825 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 6 Feb 2012 15:45:36 +0100 Subject: ARM: 7319/1: Print debug info for SIGBUS in user faults Print debug information on user faults for SIGBUS if user_debug = 16 in the kernel command line. Reference: <1327333344-26340-1-git-send-email-javi.merino@arm.com> Signed-off-by: Javi Merino Signed-off-by: Russell King --- arch/arm/mm/fault.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bb7eac381a8e..40c43a94ba72 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -164,7 +164,8 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, struct siginfo si; #ifdef CONFIG_DEBUG_USER - if (user_debug & UDBG_SEGV) { + if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || + ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", tsk->comm, sig, addr, fsr); show_pte(tsk->mm, addr); -- cgit v1.2.1