From ec8c0446b6e2b67b5c8813eb517f4bf00efa99a9 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 12 Dec 2006 17:14:57 +0000 Subject: [PATCH] Optimize D-cache alias handling on fork Virtually index, physically tagged cache architectures can get away without cache flushing when forking. This patch adds a new cache flushing function flush_cache_dup_mm(struct mm_struct *) which for the moment I've implemented to do the same thing on all architectures except on MIPS where it's a no-op. Signed-off-by: Ralf Baechle Signed-off-by: Linus Torvalds --- Documentation/cachetlb.txt | 23 +++++++++++++++++------ include/asm-alpha/cacheflush.h | 1 + include/asm-arm/cacheflush.h | 2 ++ include/asm-arm26/cacheflush.h | 1 + include/asm-avr32/cacheflush.h | 1 + include/asm-cris/cacheflush.h | 1 + include/asm-frv/cacheflush.h | 1 + include/asm-h8300/cacheflush.h | 1 + include/asm-i386/cacheflush.h | 1 + include/asm-ia64/cacheflush.h | 1 + include/asm-m32r/cacheflush.h | 3 +++ include/asm-m68k/cacheflush.h | 2 ++ include/asm-m68knommu/cacheflush.h | 1 + include/asm-mips/cacheflush.h | 2 ++ include/asm-parisc/cacheflush.h | 2 ++ include/asm-powerpc/cacheflush.h | 1 + include/asm-s390/cacheflush.h | 1 + include/asm-sh/cpu-sh2/cacheflush.h | 2 ++ include/asm-sh/cpu-sh3/cacheflush.h | 3 +++ include/asm-sh/cpu-sh4/cacheflush.h | 1 + include/asm-sh64/cacheflush.h | 2 ++ include/asm-sparc/cacheflush.h | 1 + include/asm-sparc64/cacheflush.h | 1 + include/asm-v850/cacheflush.h | 1 + include/asm-x86_64/cacheflush.h | 1 + include/asm-xtensa/cacheflush.h | 2 ++ kernel/fork.c | 2 +- 27 files changed, 54 insertions(+), 7 deletions(-) diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt index 53245c429f7d..73e794f0ff09 100644 --- a/Documentation/cachetlb.txt +++ b/Documentation/cachetlb.txt @@ -179,10 +179,21 @@ Here are the routines, one by one: lines associated with 'mm'. This interface is used to handle whole address space - page table operations such as what happens during - fork, exit, and exec. + page table operations such as what happens during exit and exec. + +2) void flush_cache_dup_mm(struct mm_struct *mm) + + This interface flushes an entire user address space from + the caches. That is, after running, there will be no cache + lines associated with 'mm'. + + This interface is used to handle whole address space + page table operations such as what happens during fork. + + This option is separate from flush_cache_mm to allow some + optimizations for VIPT caches. -2) void flush_cache_range(struct vm_area_struct *vma, +3) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) Here we are flushing a specific range of (user) virtual @@ -199,7 +210,7 @@ Here are the routines, one by one: call flush_cache_page (see below) for each entry which may be modified. -3) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) +4) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) This time we need to remove a PAGE_SIZE sized range from the cache. The 'vma' is the backing structure used by @@ -220,7 +231,7 @@ Here are the routines, one by one: This is used primarily during fault processing. -4) void flush_cache_kmaps(void) +5) void flush_cache_kmaps(void) This routine need only be implemented if the platform utilizes highmem. It will be called right before all of the kmaps @@ -232,7 +243,7 @@ Here are the routines, one by one: This routing should be implemented in asm/highmem.h -5) void flush_cache_vmap(unsigned long start, unsigned long end) +6) void flush_cache_vmap(unsigned long start, unsigned long end) void flush_cache_vunmap(unsigned long start, unsigned long end) Here in these two interfaces we are flushing a specific range diff --git a/include/asm-alpha/cacheflush.h b/include/asm-alpha/cacheflush.h index 805640b41078..b686cc7fc44e 100644 --- a/include/asm-alpha/cacheflush.h +++ b/include/asm-alpha/cacheflush.h @@ -6,6 +6,7 @@ /* Caches aren't brain-dead on the Alpha. */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index f0845646aacb..378a3a2ce8d9 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -319,6 +319,8 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long len, int write); #endif +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + /* * flush_cache_user_range is used when we want to ensure that the * Harvard caches are synchronised for the user space address range. diff --git a/include/asm-arm26/cacheflush.h b/include/asm-arm26/cacheflush.h index 9c1b9c7f2ebd..14ae15b6faab 100644 --- a/include/asm-arm26/cacheflush.h +++ b/include/asm-arm26/cacheflush.h @@ -22,6 +22,7 @@ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma,start,end) do { } while (0) #define flush_cache_page(vma,vmaddr,pfn) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) diff --git a/include/asm-avr32/cacheflush.h b/include/asm-avr32/cacheflush.h index f1bf1708980e..dfaaa88cd412 100644 --- a/include/asm-avr32/cacheflush.h +++ b/include/asm-avr32/cacheflush.h @@ -87,6 +87,7 @@ void invalidate_icache_region(void *start, size_t len); */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) diff --git a/include/asm-cris/cacheflush.h b/include/asm-cris/cacheflush.h index 72cc71dffe70..01af2de27c5b 100644 --- a/include/asm-cris/cacheflush.h +++ b/include/asm-cris/cacheflush.h @@ -9,6 +9,7 @@ */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-frv/cacheflush.h b/include/asm-frv/cacheflush.h index eaa5826bc1c8..02500405a6fb 100644 --- a/include/asm-frv/cacheflush.h +++ b/include/asm-frv/cacheflush.h @@ -20,6 +20,7 @@ */ #define flush_cache_all() do {} while(0) #define flush_cache_mm(mm) do {} while(0) +#define flush_cache_dup_mm(mm) do {} while(0) #define flush_cache_range(mm, start, end) do {} while(0) #define flush_cache_page(vma, vmaddr, pfn) do {} while(0) #define flush_cache_vmap(start, end) do {} while(0) diff --git a/include/asm-h8300/cacheflush.h b/include/asm-h8300/cacheflush.h index 1e4d95bb5ec9..71210d141b64 100644 --- a/include/asm-h8300/cacheflush.h +++ b/include/asm-h8300/cacheflush.h @@ -12,6 +12,7 @@ #define flush_cache_all() #define flush_cache_mm(mm) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma,a,b) #define flush_cache_page(vma,p,pfn) #define flush_dcache_page(page) diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h index 7199f7b326f1..74e03c8f2e51 100644 --- a/include/asm-i386/cacheflush.h +++ b/include/asm-i386/cacheflush.h @@ -7,6 +7,7 @@ /* Caches aren't brain-dead on the intel. */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-ia64/cacheflush.h b/include/asm-ia64/cacheflush.h index f2dacb4245ec..4906916d715b 100644 --- a/include/asm-ia64/cacheflush.h +++ b/include/asm-ia64/cacheflush.h @@ -18,6 +18,7 @@ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_icache_page(vma,page) do { } while (0) diff --git a/include/asm-m32r/cacheflush.h b/include/asm-m32r/cacheflush.h index 8b261b49149e..56961a9511b2 100644 --- a/include/asm-m32r/cacheflush.h +++ b/include/asm-m32r/cacheflush.h @@ -9,6 +9,7 @@ extern void _flush_cache_copyback_all(void); #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) @@ -29,6 +30,7 @@ extern void smp_flush_cache_all(void); #elif defined(CONFIG_CHIP_M32102) #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) @@ -41,6 +43,7 @@ extern void smp_flush_cache_all(void); #else #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-m68k/cacheflush.h b/include/asm-m68k/cacheflush.h index 24d3ff449135..16bf375fdbe1 100644 --- a/include/asm-m68k/cacheflush.h +++ b/include/asm-m68k/cacheflush.h @@ -89,6 +89,8 @@ static inline void flush_cache_mm(struct mm_struct *mm) __flush_cache_030(); } +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + /* flush_cache_range/flush_cache_page must be macros to avoid a dependency on linux/mm.h, which includes this file... */ static inline void flush_cache_range(struct vm_area_struct *vma, diff --git a/include/asm-m68knommu/cacheflush.h b/include/asm-m68knommu/cacheflush.h index c3aadf3b0d88..163dcb1a9689 100644 --- a/include/asm-m68knommu/cacheflush.h +++ b/include/asm-m68knommu/cacheflush.h @@ -8,6 +8,7 @@ #define flush_cache_all() __flush_cache_all() #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) __flush_cache_all() #define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_dcache_range(start,len) __flush_cache_all() diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h index e3c9925876a3..0ddada3bb0b6 100644 --- a/include/asm-mips/cacheflush.h +++ b/include/asm-mips/cacheflush.h @@ -17,6 +17,7 @@ * * - flush_cache_all() flushes entire cache * - flush_cache_mm(mm) flushes the specified mm context's cache lines + * - flush_cache_dup mm(mm) handles cache flushing when forking * - flush_cache_page(mm, vmaddr, pfn) flushes a single page * - flush_cache_range(vma, start, end) flushes a range of pages * - flush_icache_range(start, end) flush a range of instructions @@ -31,6 +32,7 @@ extern void (*flush_cache_all)(void); extern void (*__flush_cache_all)(void); extern void (*flush_cache_mm)(struct mm_struct *mm); +#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0) extern void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h index 2bc41f2e0271..aedb0512cb04 100644 --- a/include/asm-parisc/cacheflush.h +++ b/include/asm-parisc/cacheflush.h @@ -15,6 +15,8 @@ #define flush_cache_mm(mm) flush_cache_all_local() #endif +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); diff --git a/include/asm-powerpc/cacheflush.h b/include/asm-powerpc/cacheflush.h index 8a740c88d93d..08e93e789219 100644 --- a/include/asm-powerpc/cacheflush.h +++ b/include/asm-powerpc/cacheflush.h @@ -18,6 +18,7 @@ */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_icache_page(vma, page) do { } while (0) diff --git a/include/asm-s390/cacheflush.h b/include/asm-s390/cacheflush.h index e399a8ba2ed7..f7cade8083f3 100644 --- a/include/asm-s390/cacheflush.h +++ b/include/asm-s390/cacheflush.h @@ -7,6 +7,7 @@ /* Caches aren't brain-dead on the s390. */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-sh/cpu-sh2/cacheflush.h b/include/asm-sh/cpu-sh2/cacheflush.h index f556fa80ea97..2979efb26de3 100644 --- a/include/asm-sh/cpu-sh2/cacheflush.h +++ b/include/asm-sh/cpu-sh2/cacheflush.h @@ -15,6 +15,7 @@ * * - flush_cache_all() flushes entire cache * - flush_cache_mm(mm) flushes the specified mm context's cache lines + * - flush_cache_dup mm(mm) handles cache flushing when forking * - flush_cache_page(mm, vmaddr, pfn) flushes a single page * - flush_cache_range(vma, start, end) flushes a range of pages * @@ -27,6 +28,7 @@ */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h index 03fde97a7fd0..f70d8ef76a15 100644 --- a/include/asm-sh/cpu-sh3/cacheflush.h +++ b/include/asm-sh/cpu-sh3/cacheflush.h @@ -15,6 +15,7 @@ * * - flush_cache_all() flushes entire cache * - flush_cache_mm(mm) flushes the specified mm context's cache lines + * - flush_cache_dup mm(mm) handles cache flushing when forking * - flush_cache_page(mm, vmaddr, pfn) flushes a single page * - flush_cache_range(vma, start, end) flushes a range of pages * @@ -39,6 +40,7 @@ void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); @@ -48,6 +50,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page); #else #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h index 515fd574267c..b01a10f31225 100644 --- a/include/asm-sh/cpu-sh4/cacheflush.h +++ b/include/asm-sh/cpu-sh4/cacheflush.h @@ -18,6 +18,7 @@ */ void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, diff --git a/include/asm-sh64/cacheflush.h b/include/asm-sh64/cacheflush.h index 55f71aa0aa6b..1e53a47bdc97 100644 --- a/include/asm-sh64/cacheflush.h +++ b/include/asm-sh64/cacheflush.h @@ -21,6 +21,8 @@ extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/include/asm-sparc/cacheflush.h b/include/asm-sparc/cacheflush.h index fc632f811cd8..68ac10910271 100644 --- a/include/asm-sparc/cacheflush.h +++ b/include/asm-sparc/cacheflush.h @@ -48,6 +48,7 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long) #define flush_cache_all() BTFIXUP_CALL(flush_cache_all)() #define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm) +#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm) #define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end) #define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr) #define flush_icache_range(start, end) do { } while (0) diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h index 745d1ab60371..122e4058dd9e 100644 --- a/include/asm-sparc64/cacheflush.h +++ b/include/asm-sparc64/cacheflush.h @@ -12,6 +12,7 @@ /* These are the same regardless of whether this is an SMP kernel or not. */ #define flush_cache_mm(__mm) \ do { if ((__mm) == current->mm) flushw_user(); } while(0) +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) #define flush_cache_range(vma, start, end) \ flush_cache_mm((vma)->vm_mm) #define flush_cache_page(vma, page, pfn) \ diff --git a/include/asm-v850/cacheflush.h b/include/asm-v850/cacheflush.h index e1a87f82f1a4..9ece05a202ef 100644 --- a/include/asm-v850/cacheflush.h +++ b/include/asm-v850/cacheflush.h @@ -24,6 +24,7 @@ systems with MMUs, so we don't need them. */ #define flush_cache_all() ((void)0) #define flush_cache_mm(mm) ((void)0) +#define flush_cache_dup_mm(mm) ((void)0) #define flush_cache_range(vma, start, end) ((void)0) #define flush_cache_page(vma, vmaddr, pfn) ((void)0) #define flush_dcache_page(page) ((void)0) diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h index d32f7f58752a..ab1cb5c7dc92 100644 --- a/include/asm-x86_64/cacheflush.h +++ b/include/asm-x86_64/cacheflush.h @@ -7,6 +7,7 @@ /* Caches aren't brain-dead on the intel. */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h index 337765b629de..22ef901b7845 100644 --- a/include/asm-xtensa/cacheflush.h +++ b/include/asm-xtensa/cacheflush.h @@ -75,6 +75,7 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); #define flush_cache_all() __flush_invalidate_cache_all(); #define flush_cache_mm(mm) __flush_invalidate_cache_all(); +#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all(); #define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); #define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); @@ -88,6 +89,7 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_vmap(start,end) do { } while (0) #define flush_cache_vunmap(start,end) do { } while (0) diff --git a/kernel/fork.c b/kernel/fork.c index d16c566eb645..fc723e595cd5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -203,7 +203,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) struct mempolicy *pol; down_write(&oldmm->mmap_sem); - flush_cache_mm(oldmm); + flush_cache_dup_mm(oldmm); /* * Not linked in yet - no deadlock potential: */ -- cgit v1.2.1