diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-10-29 17:49:12 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-29 17:49:12 -0400 |
commit | b0c4e148bd591629749d02a8fbc8d81c26d548cf (patch) | |
tree | 3e2142635f3dc2ceeae870ead2dceab7b9c6def1 /arch/mips/mm/cache.c | |
parent | 5615ca7906aefbdc3318604c89db5931d0a25910 (diff) | |
parent | be15cd72d256e5eb3261a781b8507fac83ab33f6 (diff) | |
download | talos-obmc-linux-b0c4e148bd591629749d02a8fbc8d81c26d548cf.tar.gz talos-obmc-linux-b0c4e148bd591629749d02a8fbc8d81c26d548cf.zip |
Merge branch 'master'
Diffstat (limited to 'arch/mips/mm/cache.c')
-rw-r--r-- | arch/mips/mm/cache.c | 106 |
1 files changed, 51 insertions, 55 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 1d95cdb77bed..314701a66b13 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -23,8 +23,10 @@ void (*__flush_cache_all)(void); void (*flush_cache_mm)(struct mm_struct *mm); void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, unsigned long end); -void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); -void (*flush_icache_range)(unsigned long start, unsigned long end); +void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, + unsigned long pfn); +void (*flush_icache_range)(unsigned long __user start, + unsigned long __user end); void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); /* MIPS specific cache operations */ @@ -32,6 +34,8 @@ void (*flush_cache_sigtramp)(unsigned long addr); void (*flush_data_cache_page)(unsigned long addr); void (*flush_icache_all)(void); +EXPORT_SYMBOL(flush_data_cache_page); + #ifdef CONFIG_DMA_NONCOHERENT /* DMA cache operations. */ @@ -49,10 +53,12 @@ EXPORT_SYMBOL(_dma_cache_inv); * We could optimize the case where the cache argument is not BCACHE but * that seems very atypical use ... */ -asmlinkage int sys_cacheflush(unsigned long addr, unsigned long int bytes, - unsigned int cache) +asmlinkage int sys_cacheflush(unsigned long __user addr, + unsigned long bytes, unsigned int cache) { - if (!access_ok(VERIFY_WRITE, (void *) addr, bytes)) + if (bytes == 0) + return 0; + if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) return -EFAULT; flush_icache_range(addr, addr + bytes); @@ -100,58 +106,48 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, } } -extern void ld_mmu_r23000(void); -extern void ld_mmu_r4xx0(void); -extern void ld_mmu_tx39(void); -extern void ld_mmu_r6000(void); -extern void ld_mmu_tfp(void); -extern void ld_mmu_andes(void); -extern void ld_mmu_sb1(void); +#define __weak __attribute__((weak)) + +static char cache_panic[] __initdata = "Yeee, unsupported cache architecture."; void __init cpu_cache_init(void) { - if (cpu_has_4ktlb) { -#if defined(CONFIG_CPU_R4X00) || defined(CONFIG_CPU_VR41XX) || \ - defined(CONFIG_CPU_R4300) || defined(CONFIG_CPU_R5000) || \ - defined(CONFIG_CPU_NEVADA) || defined(CONFIG_CPU_R5432) || \ - defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_MIPS32) || \ - defined(CONFIG_CPU_MIPS64) || defined(CONFIG_CPU_TX49XX) || \ - defined(CONFIG_CPU_RM7000) || defined(CONFIG_CPU_RM9000) - ld_mmu_r4xx0(); -#endif - } else switch (current_cpu_data.cputype) { -#ifdef CONFIG_CPU_R3000 - case CPU_R2000: - case CPU_R3000: - case CPU_R3000A: - case CPU_R3081E: - ld_mmu_r23000(); - break; -#endif -#ifdef CONFIG_CPU_TX39XX - case CPU_TX3912: - case CPU_TX3922: - case CPU_TX3927: - ld_mmu_tx39(); - break; -#endif -#ifdef CONFIG_CPU_R10000 - case CPU_R10000: - case CPU_R12000: - ld_mmu_r4xx0(); - break; -#endif -#ifdef CONFIG_CPU_SB1 - case CPU_SB1: - ld_mmu_sb1(); - break; -#endif - - case CPU_R8000: - panic("R8000 is unsupported"); - break; - - default: - panic("Yeee, unsupported cache architecture."); + if (cpu_has_3k_cache) { + extern void __weak r3k_cache_init(void); + + r3k_cache_init(); + return; + } + if (cpu_has_6k_cache) { + extern void __weak r6k_cache_init(void); + + r6k_cache_init(); + return; + } + if (cpu_has_4k_cache) { + extern void __weak r4k_cache_init(void); + + r4k_cache_init(); + return; } + if (cpu_has_8k_cache) { + extern void __weak r8k_cache_init(void); + + r8k_cache_init(); + return; + } + if (cpu_has_tx39_cache) { + extern void __weak tx39_cache_init(void); + + tx39_cache_init(); + return; + } + if (cpu_has_sb1_cache) { + extern void __weak sb1_cache_init(void); + + sb1_cache_init(); + return; + } + + panic(cache_panic); } |