diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-12-28 10:31:48 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-02-13 10:54:44 +0900 |
commit | 26b7a78c55fbc0e23a7dc19e89fd50f200efc002 (patch) | |
tree | a830e70a57d4e9cbc669bc362db73ba5ace30d4d | |
parent | 7a847f819063b80cc5b38d39e8aad4d60f6ca2fd (diff) | |
download | blackbird-op-linux-26b7a78c55fbc0e23a7dc19e89fd50f200efc002.tar.gz blackbird-op-linux-26b7a78c55fbc0e23a7dc19e89fd50f200efc002.zip |
sh: Lazy dcache writeback optimizations.
This converts the lazy dcache handling to the model described in
Documentation/cachetlb.txt and drops the ptep_get_and_clear() hacks
used for the aliasing dcaches on SH-4 and SH7705 in 32kB mode. As a
bonus, this slightly cuts down on the cache flushing frequency.
With that and the PTEA handling out of the way, the update_mmu_cache()
implementations can be consolidated, and we no longer have to worry
about which configuration the cache is in for the SH7705 case.
And finally, explicitly disable the lazy writeback on SMP (SH-4A).
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/kernel/cpu/sh4/probe.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/Kconfig | 2 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 12 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 9 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 22 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh7705.c | 31 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 55 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh3.c | 63 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 68 | ||||
-rw-r--r-- | include/asm-sh/cacheflush.h | 3 | ||||
-rw-r--r-- | include/asm-sh/cpu-sh3/cacheflush.h | 2 | ||||
-rw-r--r-- | include/asm-sh/cpu-sh4/cacheflush.h | 12 | ||||
-rw-r--r-- | include/asm-sh/pgtable.h | 5 |
13 files changed, 79 insertions, 209 deletions
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 9031a22a2ce7..b26e2bc5894d 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -181,10 +181,6 @@ int __init detect_cpu_and_cache_system(void) cpu_data->dcache.ways = 1; #endif -#ifdef CONFIG_CPU_HAS_PTEA - cpu_data->flags |= CPU_HAS_PTEA; -#endif - /* * On anything that's not a direct-mapped cache, look to the CVR * for I/D-cache specifics. diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index fddf6680ec4f..28b5102e1cdc 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -20,7 +20,7 @@ config CPU_SH4 bool select CPU_HAS_INTEVT select CPU_HAS_SR_RB - select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 + select CPU_HAS_PTEA if (!CPU_SUBTYPE_ST40 && !CPU_SH4A) || CPU_SHX2 config CPU_SH4A bool diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index c6955157c989..72bb48773337 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -236,10 +236,20 @@ static inline void flush_cache_4096(unsigned long start, /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) + * + * This uses a lazy write-back on UP, which is explicitly + * disabled on SMP. */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) { +#ifndef CONFIG_SMP + struct address_space *mapping = page_mapping(page); + + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else +#endif + { unsigned long phys = PHYSADDR(page_address(page)); unsigned long addr = CACHE_OC_ADDRESS_ARRAY; int i, n; diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 045abdf078f5..2808b580d984 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -3,11 +3,11 @@ * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2004 Alex Song + * Copyright (C) 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. - * */ #include <linux/init.h> #include <linux/mman.h> @@ -51,7 +51,6 @@ static inline void cache_wback_all(void) if ((data & v) == v) ctrl_outl(data & ~v, addr); - } addrstart += cpu_data->dcache.way_incr; @@ -128,7 +127,11 @@ static void __flush_dcache_page(unsigned long phys) */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) + struct address_space *mapping = page_mapping(page); + + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else __flush_dcache_page(PHYSADDR(page_address(page))); } diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 3f98d2a4f936..cfc323551741 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -23,7 +23,6 @@ extern struct mutex p3map_mutex[]; */ void clear_user_page(void *to, unsigned long address, struct page *page) { - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { @@ -59,7 +58,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { @@ -84,23 +82,3 @@ void copy_user_page(void *to, void *from, unsigned long address, mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } - -/* - * For SH-4, we have our own implementation for ptep_get_and_clear - */ -inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - return pte; -} diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c index ff9ece986cbc..b052d0fee827 100644 --- a/arch/sh/mm/pg-sh7705.c +++ b/arch/sh/mm/pg-sh7705.c @@ -7,9 +7,7 @@ * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. - * */ - #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> @@ -76,7 +74,6 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) { struct page *page = virt_to_page(to); - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { clear_page(to); __flush_wback_region(to, PAGE_SIZE); @@ -95,12 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) * @from: P1 address * @address: U0 address to be mapped */ -void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) +void copy_user_page(void *to, void *from, unsigned long address, + struct page *pg) { struct page *page = virt_to_page(to); - - __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { copy_page(to, from); __flush_wback_region(to, PAGE_SIZE); @@ -112,26 +108,3 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg __flush_wback_region(to, PAGE_SIZE); } } - -/* - * For SH7705, we have our own implementation for ptep_get_and_clear - * Copied from pg-sh4.c - */ -inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - - return pte; -} - diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index 73ec7f6084fa..9347534aa894 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c @@ -2,15 +2,17 @@ * TLB flushing operations for SH with an MMU. * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2003 - 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/mm.h> +#include <linux/io.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> +#include <asm/cacheflush.h> void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { @@ -132,3 +134,54 @@ void flush_tlb_all(void) ctrl_barrier(); local_irq_restore(flags); } + +void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + unsigned long flags; + unsigned long pteval; + unsigned long vpn; + struct page *page; + unsigned long pfn = pte_pfn(pte); + struct address_space *mapping; + + if (!pfn_valid(pfn)) + return; + + page = pfn_to_page(pfn); + mapping = page_mapping(page); + if (mapping) { + unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + + if (dirty) + __flush_wback_region((void *)P1SEGADDR(phys), + PAGE_SIZE); + } + + local_irq_save(flags); + + /* Set PTEH register */ + vpn = (address & MMU_VPN_MASK) | get_asid(); + ctrl_outl(vpn, MMU_PTEH); + + pteval = pte_val(pte); + +#ifdef CONFIG_CPU_HAS_PTEA + /* Set PTEA register */ + /* TODO: make this look less hacky */ + ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); +#endif + + /* Set PTEL register */ + pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ +#ifdef CONFIG_SH_WRITETHROUGH + pteval |= _PAGE_WT; +#endif + /* conveniently, we want all the software flags to be 0 anyway */ + ctrl_outl(pteval, MMU_PTEL); + + /* Load the TLB */ + asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); + local_irq_restore(flags); +} diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 46b09e26e082..16627069c536 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -8,69 +8,9 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/interrupt.h> - +#include <linux/io.h> #include <asm/system.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pgalloc.h> #include <asm/mmu_context.h> -#include <asm/cacheflush.h> - -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) - return; - -#if defined(CONFIG_SH7705_CACHE_32KB) - { - struct page *page = pte_page(pte); - unsigned long pfn = pte_pfn(pte); - - if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - - __flush_wback_region((void *)P1SEGADDR(phys), - PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } - } -#endif - - local_irq_save(flags); - - /* Set PTEH register */ - vpn = (address & MMU_VPN_MASK) | get_asid(); - ctrl_outl(vpn, MMU_PTEH); - - pteval = pte_val(pte); - - /* Set PTEL register */ - pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ - /* conveniently, we want all the software flags to be 0 anyway */ - ctrl_outl(pteval, MMU_PTEL); - - /* Load the TLB */ - asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); - local_irq_restore(flags); -} void __flush_tlb_page(unsigned long asid, unsigned long page) { @@ -94,4 +34,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) for (i = 0; i < ways; i++) ctrl_outl(data, addr + (i << 8)); } - diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 812b2d567de2..758d8dec622b 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -8,74 +8,9 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/interrupt.h> - +#include <linux/io.h> #include <asm/system.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pgalloc.h> #include <asm/mmu_context.h> -#include <asm/cacheflush.h> - -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - struct page *page; - unsigned long pfn; - - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) - return; - - pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (!test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } - } - - local_irq_save(flags); - - /* Set PTEH register */ - vpn = (address & MMU_VPN_MASK) | get_asid(); - ctrl_outl(vpn, MMU_PTEH); - - pteval = pte_val(pte); - - /* Set PTEA register */ - if (cpu_data->flags & CPU_HAS_PTEA) - /* TODO: make this look less hacky */ - ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); - - /* Set PTEL register */ - pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ -#ifdef CONFIG_SH_WRITETHROUGH - pteval |= _PAGE_WT; -#endif - /* conveniently, we want all the software flags to be 0 anyway */ - ctrl_outl(pteval, MMU_PTEL); - - /* Load the TLB */ - asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); - local_irq_restore(flags); -} void __flush_tlb_page(unsigned long asid, unsigned long page) { @@ -93,4 +28,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) ctrl_outl(data, addr); back_to_P1(); } - diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h index 07f62ec9ff0c..22f12634975b 100644 --- a/include/asm-sh/cacheflush.h +++ b/include/asm-sh/cacheflush.h @@ -30,5 +30,8 @@ extern void __flush_invalidate_region(void *start, int size); #define HAVE_ARCH_UNMAPPED_AREA +/* Page flag for lazy dcache write-back for the aliasing UP caches */ +#define PG_dcache_dirty PG_arch_1 + #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */ diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h index f70d8ef76a15..6fabbba228de 100644 --- a/include/asm-sh/cpu-sh3/cacheflush.h +++ b/include/asm-sh/cpu-sh3/cacheflush.h @@ -36,8 +36,6 @@ /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ #define CACHE_ALIAS 0x00001000 -#define PG_mapped PG_arch_1 - void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); #define flush_cache_dup_mm(mm) flush_cache_mm(mm) diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h index b01a10f31225..f563c3bccc43 100644 --- a/include/asm-sh/cpu-sh4/cacheflush.h +++ b/include/asm-sh/cpu-sh4/cacheflush.h @@ -38,16 +38,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, /* Initialization of P3 area for copy_user_page */ void p3_cache_init(void); -#define PG_mapped PG_arch_1 - -#ifdef CONFIG_MMU -extern int remap_area_pages(unsigned long addr, unsigned long phys_addr, - unsigned long size, unsigned long flags); -#else /* CONFIG_MMU */ -static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr, - unsigned long size, unsigned long flags) -{ - return 0; -} -#endif /* CONFIG_MMU */ #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index eba14184baf3..3721a4412cea 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -583,11 +583,6 @@ struct mm_struct; extern unsigned int kobjsize(const void *objp); #endif /* !CONFIG_MMU */ -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -#endif - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); |