From c768e67625688517c23f46b31a46e1f7d2de1c71 Mon Sep 17 00:00:00 2001 From: Hartley Sweeten Date: Wed, 21 Oct 2009 02:27:01 +0100 Subject: ARM: 5769/1: CPU_ARM920T: remove dead Maverick EP9312 URL Remove the URL listed for Maverick EP9312 since it is not available and modify the help text appropriately. Signed-off-by: H Hartley Sweeten Acked-by: Ryan Mallon Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index e993140edd88..9264d814cd7a 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -122,10 +122,7 @@ config CPU_ARM920T select CPU_TLB_V4WBI if MMU help The ARM920T is licensed to be produced by numerous vendors, - and is used in the Maverick EP9312 and the Samsung S3C2410. - - More information on the Maverick EP9312 at - . + and is used in the Cirrus EP93xx and the Samsung S3C2410. Say Y if you want support for the ARM920T processor. Otherwise, say N. -- cgit v1.2.1 From c06e004c72aa0d1acbc239fb995aa3d823543a8a Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Oct 2009 22:36:10 +0000 Subject: ARM: Use GFP_DMA only for masks _less_ than 32-bit We were using GFP_DMA for masks other than 0xffffffff, which is wrong when some masks are initialized to 0xffffffffffffffff. This caused such masks to obtain memory from the precious DMA pool. Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b30925fcbcdc..b9590a7085ca 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -205,7 +205,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, order = get_order(size); - if (mask != 0xffffffff) + if (mask < 0xffffffffULL) gfp |= GFP_DMA; page = alloc_pages(gfp, order); @@ -289,7 +289,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, if (!mask) goto error; - if (mask != 0xffffffff) + if (mask < 0xffffffffULL) gfp |= GFP_DMA; virt = kmalloc(size, gfp); if (!virt) -- cgit v1.2.1 From 657e12fd388899502d47f9f6f9d276ec9ced8add Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 29 Oct 2009 17:06:17 +0000 Subject: ARM: Fix sparsemem with SPARSEMEM_EXTREME enabled When SPARSEMEM_EXTREME is enabled, memory_present() wants to use bootmem to allocate data structures. However, we call memory_present() after declaring memory to bootmem, but before we've reserved areas. This leads to sparsemem data structures being overwritten later in the kernel's initialization (when slab initializes.) Signed-off-by: Russell King --- arch/arm/mm/init.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 40940d7ce4ff..52c40d155672 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -273,7 +273,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi, struct membank *bank = &mi->bank[i]; if (!bank->highmem) free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); - memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); } /* @@ -370,6 +369,19 @@ int pfn_valid(unsigned long pfn) return 0; } EXPORT_SYMBOL(pfn_valid); + +static void arm_memory_present(struct meminfo *mi, int node) +{ +} +#else +static void arm_memory_present(struct meminfo *mi, int node) +{ + int i; + for_each_nodebank(i, mi, node) { + struct membank *bank = &mi->bank[i]; + memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); + } +} #endif static int __init meminfo_cmp(const void *_a, const void *_b) @@ -427,6 +439,12 @@ void __init bootmem_init(void) */ if (node == initrd_node) bootmem_reserve_initrd(node); + + /* + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations + */ + arm_memory_present(mi, node); } /* -- cgit v1.2.1 From df71dfd4ca01130f98d9dbfab76c440d72a177c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 24 Oct 2009 22:36:36 +0100 Subject: ARM: Fix errata 411920 workarounds Errata 411920 indicates that any "invalidate entire instruction cache" operation can fail if the right conditions are present. This is not limited just to those operations in flush.c, but elsewhere. Place the workaround in the already existing __flush_icache_all() function instead. Signed-off-by: Russell King --- arch/arm/mm/context.c | 5 +---- arch/arm/mm/flush.c | 31 ++++++------------------------- 2 files changed, 7 insertions(+), 29 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 6bda76a43199..a9e22e31eaa1 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -50,10 +50,7 @@ void __new_context(struct mm_struct *mm) isb(); flush_tlb_all(); if (icache_is_vivt_asid_tagged()) { - asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" - "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n" - : - : "r" (0)); + __flush_icache_all(); dsb(); } } diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index b27942909b23..7f294f307c83 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -18,10 +18,6 @@ #include "mm.h" -#ifdef CONFIG_ARM_ERRATA_411920 -extern void v6_icache_inval_all(void); -#endif - #ifdef CONFIG_CPU_CACHE_VIPT #define ALIAS_FLUSH_START 0xffff4000 @@ -35,16 +31,11 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) flush_tlb_kernel_page(to); asm( "mcrr p15, 0, %1, %0, c14\n" - " mcr p15, 0, %2, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %2, c7, c5, 0\n" -#endif + " mcr p15, 0, %2, c7, c10, 4" : : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif + __flush_icache_all(); } void flush_cache_mm(struct mm_struct *mm) @@ -57,16 +48,11 @@ void flush_cache_mm(struct mm_struct *mm) if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" - " mcr p15, 0, %0, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %0, c7, c5, 0\n" -#endif + " mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif + __flush_icache_all(); } } @@ -81,16 +67,11 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" - " mcr p15, 0, %0, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %0, c7, c5, 0\n" -#endif + " mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif + __flush_icache_all(); } } -- cgit v1.2.1 From 4b46d6416548fb6a0940dfd9911fd895eb6247b3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 1 Nov 2009 17:44:24 +0000 Subject: ARM: ensure initial page tables are setup for SMP systems Mapping the same memory using two different attributes (memory type, shareability, cacheability) is unpredictable. During boot, we encounter a situation when we're updating the kernel's page tables which can lead to dirty cache lines existing in the cache which are subsequently missed. This causes stack corruption, and therefore a crash. Therefore, ensure that the shared and cacheability settings matches the configuration that will be used later; this together with the restriction in early_cachepolicy() ensures that we won't create a mismatch during boot. Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 7 +++++++ arch/arm/mm/proc-v6.S | 7 ++++--- arch/arm/mm/proc-v7.S | 7 ++++--- 3 files changed, 15 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 02243eeccf50..ea67be0223ac 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -117,6 +117,13 @@ static void __init early_cachepolicy(char **p) } if (i == ARRAY_SIZE(cache_policies)) printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); + /* + * This restriction is partly to do with the way we boot; it is + * unpredictable to have memory mapped using two different sets of + * memory attributes (shared, type, and cache attribs). We can not + * change these attributes once the initial assembly has setup the + * page tables. + */ if (cpu_architecture() >= CPU_ARCH_ARMv6) { printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); cachepolicy = CPOLICY_WRITEBACK; diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 194737d60a22..70f75d2e3ead 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -32,8 +32,10 @@ #ifndef CONFIG_SMP #define TTB_FLAGS TTB_RGN_WBWA +#define PMD_FLAGS PMD_SECT_WB #else #define TTB_FLAGS TTB_RGN_WBWA|TTB_S +#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S #endif ENTRY(cpu_v6_proc_init) @@ -222,10 +224,9 @@ __v6_proc_info: .long 0x0007b000 .long 0x0007f000 .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ + PMD_SECT_AP_READ | \ + PMD_FLAGS .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 23ebcf6eab9f..eeeed01ee44a 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -33,9 +33,11 @@ #ifndef CONFIG_SMP /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ #define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB +#define PMD_FLAGS PMD_SECT_WB #else /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ #define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA +#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S #endif ENTRY(cpu_v7_proc_init) @@ -326,10 +328,9 @@ __v7_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ + PMD_SECT_AP_READ | \ + PMD_FLAGS .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ -- cgit v1.2.1 From 1b3a02eb452354fa9b36a7f33dc4c8307bbc40aa Mon Sep 17 00:00:00 2001 From: Tony Thompson Date: Wed, 4 Nov 2009 12:16:38 +0000 Subject: ARMv7: Check whether the SMP/nAMP mode was already enabled If running in non-secure mode, enabling this register will fault. Signed-off-by: Tony Thompson Acked-by: Srinidhi Kasagar Signed-off-by: Catalin Marinas --- arch/arm/mm/proc-v7.S | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index eeeed01ee44a..3a285218fd15 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -186,9 +186,10 @@ cpu_v7_name: */ __v7_setup: #ifdef CONFIG_SMP - mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and - orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting - mcr p15, 0, r0, c1, c0, 1 + mrc p15, 0, r0, c1, c0, 1 + tst r0, #(1 << 6) @ SMP/nAMP mode enabled? + orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and + mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting #endif adr r12, __v7_setup_stack @ the local stack stmia r12, {r0-r5, r7, r9, r11, lr} -- cgit v1.2.1 From 2f0b192633f1fbf253b21c90938733491549edae Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Oct 2009 10:40:02 +0000 Subject: ARM: Avoid duplicated implementation for VIVT cache flushing We had two copies of the wrapper code for VIVT cache flushing - one in asm/cacheflush.h and one in arch/arm/mm/flush.c. Reduce this down to one common copy. Signed-off-by: Russell King --- arch/arm/mm/flush.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 7f294f307c83..a480f161a4bb 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -41,8 +41,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) void flush_cache_mm(struct mm_struct *mm) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) - __cpuc_flush_user_all(); + vivt_flush_cache_mm(mm); return; } @@ -59,9 +58,7 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) - __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), - vma->vm_flags); + vivt_flush_cache_range(vma, start, end); return; } @@ -78,10 +75,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = user_addr & PAGE_MASK; - __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); - } + vivt_flush_cache_page(vma, user_addr, pfn); return; } @@ -94,10 +88,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long len, int write) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } + vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); return; } -- cgit v1.2.1 From b7dc0b2cfc6e9bc7270915c642a8a8e999b6095e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Oct 2009 11:25:50 +0000 Subject: ARM: Avoid evaluating page_address() multiple times page_address() is a function call rather than a macro, and so: if (page_address(page)) do_something(page_address(page)); results in two calls to this function. This is unnecessary; remove the duplication. Signed-off-by: Russell King --- arch/arm/mm/flush.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index a480f161a4bb..43474d8752a6 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -111,6 +111,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, void __flush_dcache_page(struct address_space *mapping, struct page *page) { + void *addr = page_address(page); + /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually @@ -121,9 +123,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) * kmap_atomic() doesn't set the page virtual address, and * kunmap_atomic() takes care of cache flushing already. */ - if (page_address(page)) + if (addr) #endif - __cpuc_flush_dcache_page(page_address(page)); + __cpuc_flush_dcache_page(addr); /* * If this is a page cache page, and we have an aliasing VIPT cache, -- cgit v1.2.1 From 421fe93cc4b06b2f5e875cbe0f692800d4862ee5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Oct 2009 10:23:04 +0000 Subject: ARM: ZERO_PAGE: Avoid flush_dcache_page() for zero page The zero page is read-only, and has its cache state cleared during boot. No further maintanence for this page is required. Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 7 +++++++ arch/arm/mm/flush.c | 11 ++++++++++- arch/arm/mm/mmu.c | 2 +- 3 files changed, 18 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index d0d17b6a3703..4fbc7de8b4ac 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -151,7 +151,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) if (!pfn_valid(pfn)) return; + /* + * The zero page is never written to, so never has any dirty + * cache lines, and therefore never needs to be flushed. + */ page = pfn_to_page(pfn); + if (page == ZERO_PAGE(0)) + return; + mapping = page_mapping(page); #ifndef CONFIG_SMP if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 43474d8752a6..82f4b06bf6b4 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -189,7 +189,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p */ void flush_dcache_page(struct page *page) { - struct address_space *mapping = page_mapping(page); + struct address_space *mapping; + + /* + * The zero page is never written to, so never has any dirty + * cache lines, and therefore never needs to be flushed. + */ + if (page == ZERO_PAGE(0)) + return; + + mapping = page_mapping(page); #ifndef CONFIG_SMP if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ea67be0223ac..2427cdcd9098 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1036,7 +1036,7 @@ void __init paging_init(struct machine_desc *mdesc) */ zero_page = alloc_bootmem_low_pages(PAGE_SIZE); empty_zero_page = virt_to_page(zero_page); - flush_dcache_page(empty_zero_page); + __flush_dcache_page(NULL, empty_zero_page); } /* -- cgit v1.2.1 From 7b0a1003e780193e2a11c27f5059c26b65f60679 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 24 Oct 2009 14:11:59 +0100 Subject: ARM: Reduce __flush_dcache_page() visibility Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 2 ++ arch/arm/mm/mm.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 4fbc7de8b4ac..729602291958 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -23,6 +23,8 @@ #include #include +#include "mm.h" + static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; /* diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index c4f6f05198e0..a888363398f8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -24,6 +24,8 @@ struct mem_type { const struct mem_type *get_mem_type(unsigned int type); +extern void __flush_dcache_page(struct address_space *mapping, struct page *page); + #endif struct map_desc; -- cgit v1.2.1 From 2df341edf6b8a2db7f414d00faeadccbdd9844ab Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 24 Oct 2009 22:58:40 +0100 Subject: ARM: Move __flush_icache_all() out of flush_pfn_alias() Signed-off-by: Russell King --- arch/arm/mm/flush.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 82f4b06bf6b4..302d66517488 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -35,7 +35,6 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) : : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) : "cc"); - __flush_icache_all(); } void flush_cache_mm(struct mm_struct *mm) @@ -79,8 +78,10 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig return; } - if (cache_is_vipt_aliasing()) + if (cache_is_vipt_aliasing()) { flush_pfn_alias(pfn, user_addr); + __flush_icache_all(); + } } void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, @@ -94,6 +95,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, if (cache_is_vipt_aliasing()) { flush_pfn_alias(page_to_pfn(page), uaddr); + __flush_icache_all(); return; } @@ -132,9 +134,11 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) * we only need to do one flush - which would be at the relevant * userspace colour, which is congruent with page->index. */ - if (mapping && cache_is_vipt_aliasing()) + if (mapping && cache_is_vipt_aliasing()) { flush_pfn_alias(page_to_pfn(page), page->index << PAGE_CACHE_SHIFT); + __flush_icache_all(); + } } static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) @@ -244,6 +248,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l * userspace address only. */ flush_pfn_alias(pfn, vmaddr); + __flush_icache_all(); } /* -- cgit v1.2.1 From f91fb05d826a43063fa0aa2ec30c23d3993a208d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 24 Oct 2009 23:05:34 +0100 Subject: ARM: Remove __flush_icache_all() from __flush_dcache_page() Both call sites for __flush_dcache_page() end up calling __flush_icache_all() themselves, so having __flush_dcache_page() do this as well is wasteful. Remove the duplicated icache flushing. Signed-off-by: Russell King --- arch/arm/mm/flush.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 302d66517488..dc66f867bec4 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -134,11 +134,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) * we only need to do one flush - which would be at the relevant * userspace colour, which is congruent with page->index. */ - if (mapping && cache_is_vipt_aliasing()) { + if (mapping && cache_is_vipt_aliasing()) flush_pfn_alias(page_to_pfn(page), page->index << PAGE_CACHE_SHIFT); - __flush_icache_all(); - } } static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) -- cgit v1.2.1 From 115b22474eb1905da2f606a057da3455833333d3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 24 Nov 2009 18:54:07 +0100 Subject: ARM: 5794/1: Flush the D-cache during copy_user_highpage() The I and D caches for copy-on-write pages on processors with write-allocate caches become incoherent causing problems on application relying on CoW for text pages (dynamic linker relocating symbols in a text page). This patch flushes the D-cache for such pages. Cc: Nicolas Pitre Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/copypage-v6.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 4127a7bddfe5..841f355319bf 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,6 +41,14 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); +#ifdef CONFIG_HIGHMEM + /* + * kmap_atomic() doesn't set the page virtual address, and + * kunmap_atomic() takes care of cache flushing already. + */ + if (page_address(to) != NULL) +#endif + __cpuc_flush_dcache_page(kto); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } -- cgit v1.2.1 From 9e95922b1016ac941db7edcf6b6088b3c2e916c8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Oct 2009 13:35:13 +0000 Subject: ARM: I-cache: Add invalidation for VIVT ASID tagged caches Signed-off-by: Russell King --- arch/arm/mm/flush.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index dc66f867bec4..9770e27dd581 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -69,6 +69,9 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned : "cc"); __flush_icache_all(); } + + if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) + __flush_icache_all(); } void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) @@ -82,6 +85,9 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig flush_pfn_alias(pfn, user_addr); __flush_icache_all(); } + + if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) + __flush_icache_all(); } void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, -- cgit v1.2.1 From ea201dbb78651c71c56e440b8b3132906bc7456d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Oct 2009 14:31:40 +0000 Subject: ARM: I-cache: avoid flushing in flush_cache_mm() flush_cache_mm() is called in two cases: 1. when a process exits, just before the page tables are torn down. We can allow the stale lines to evict themselves over time without causing any harm. 2. when a process forks, and we've allocated a new ASID. The instruction cache issues are dealt with as pages are brought into the new process address space. Flushing the I-cache here is therefore unnecessary. However, we must keep the VIPT aliasing D-cache flush to ensure that any dirty cache lines are not written back after the pages have been reallocated for some other use - which would result in corruption. Signed-off-by: Russell King --- arch/arm/mm/flush.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 9770e27dd581..f8feb5d919fe 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -50,7 +50,6 @@ void flush_cache_mm(struct mm_struct *mm) : : "r" (0) : "cc"); - __flush_icache_all(); } } -- cgit v1.2.1 From 6060e8df517847bf445ebc61de7d4d9c7faae990 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Oct 2009 14:12:27 +0000 Subject: ARM: I-cache: flush executable mappings in flush_cache_range() Dirk Behme reported instability on ARM11 SMP (VIPT non-aliasing cache) caused by the dynamic linker changing protection on text pages to write GOT entries. The problem is due to an interaction between the write faulting code providing new anonymous pages which are incoherent with the I-cache due to write buffering, and the I-cache not having been invalidated. a4db94d plugs the hole with the data cache coherency. This patch provides the other half of the fix by flushing the I-cache in flush_cache_range() for VM_EXEC VMAs (which is what we have when the region is being made executable again.) This ensures that the I-cache will be up to date with the newly COW'd pages. Note: if users are writing instructions, then they still need to use the ARM sys_cacheflush API to ensure that the caches are correctly synchronized. Signed-off-by: Russell King --- arch/arm/mm/flush.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index f8feb5d919fe..329594e760cd 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -66,10 +66,9 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned : : "r" (0) : "cc"); - __flush_icache_all(); } - if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) + if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } -- cgit v1.2.1