diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/mips/mm/c-r3k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 149 | ||||
-rw-r--r-- | arch/mips/mm/c-sb1.c | 59 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 19 | ||||
-rw-r--r-- | arch/mips/mm/cerr-sb1.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/dma-coherent.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 66 | ||||
-rw-r--r-- | arch/mips/mm/pg-sb1.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-32.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/pgtable.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 112 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 22 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r8k.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 160 |
19 files changed, 374 insertions, 234 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 4a6220116c96..19e41fd186c4 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o +obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o # # Choose one DMA coherency model diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index bb041a22f20a..e1f35ef81145 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -335,7 +335,7 @@ void __init r3k_cache_init(void) flush_cache_mm = r3k_flush_cache_mm; flush_cache_range = r3k_flush_cache_range; flush_cache_page = r3k_flush_cache_page; - flush_icache_page = r3k_flush_icache_page; + __flush_icache_page = r3k_flush_icache_page; flush_icache_range = r3k_flush_icache_range; flush_cache_sigtramp = r3k_flush_cache_sigtramp; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 4a43924cd4fc..0b2da53750bd 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -7,7 +7,6 @@ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ -#include <linux/config.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -60,13 +59,13 @@ static unsigned long scache_size __read_mostly; /* * Dummy cache handling routines for machines without boardcaches */ -static void no_sc_noop(void) {} +static void cache_noop(void) {} static struct bcache_ops no_sc_ops = { - .bc_enable = (void *)no_sc_noop, - .bc_disable = (void *)no_sc_noop, - .bc_wback_inv = (void *)no_sc_noop, - .bc_inv = (void *)no_sc_noop + .bc_enable = (void *)cache_noop, + .bc_disable = (void *)cache_noop, + .bc_wback_inv = (void *)cache_noop, + .bc_inv = (void *)cache_noop }; struct bcache_ops *bcops = &no_sc_ops; @@ -90,11 +89,13 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr) blast_dcache32_page(addr); } -static inline void r4k_blast_dcache_page_setup(void) +static void __init r4k_blast_dcache_page_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); - if (dc_lsize == 16) + if (dc_lsize == 0) + r4k_blast_dcache_page = (void *)cache_noop; + else if (dc_lsize == 16) r4k_blast_dcache_page = blast_dcache16_page; else if (dc_lsize == 32) r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; @@ -102,11 +103,13 @@ static inline void r4k_blast_dcache_page_setup(void) static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); -static inline void r4k_blast_dcache_page_indexed_setup(void) +static void __init r4k_blast_dcache_page_indexed_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); - if (dc_lsize == 16) + if (dc_lsize == 0) + r4k_blast_dcache_page_indexed = (void *)cache_noop; + else if (dc_lsize == 16) r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; else if (dc_lsize == 32) r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; @@ -114,11 +117,13 @@ static inline void r4k_blast_dcache_page_indexed_setup(void) static void (* r4k_blast_dcache)(void); -static inline void r4k_blast_dcache_setup(void) +static void __init r4k_blast_dcache_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); - if (dc_lsize == 16) + if (dc_lsize == 0) + r4k_blast_dcache = (void *)cache_noop; + else if (dc_lsize == 16) r4k_blast_dcache = blast_dcache16; else if (dc_lsize == 32) r4k_blast_dcache = blast_dcache32; @@ -197,11 +202,13 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page) static void (* r4k_blast_icache_page)(unsigned long addr); -static inline void r4k_blast_icache_page_setup(void) +static void __init r4k_blast_icache_page_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); - if (ic_lsize == 16) + if (ic_lsize == 0) + r4k_blast_icache_page = (void *)cache_noop; + else if (ic_lsize == 16) r4k_blast_icache_page = blast_icache16_page; else if (ic_lsize == 32) r4k_blast_icache_page = blast_icache32_page; @@ -212,11 +219,13 @@ static inline void r4k_blast_icache_page_setup(void) static void (* r4k_blast_icache_page_indexed)(unsigned long addr); -static inline void r4k_blast_icache_page_indexed_setup(void) +static void __init r4k_blast_icache_page_indexed_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); - if (ic_lsize == 16) + if (ic_lsize == 0) + r4k_blast_icache_page_indexed = (void *)cache_noop; + else if (ic_lsize == 16) r4k_blast_icache_page_indexed = blast_icache16_page_indexed; else if (ic_lsize == 32) { if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) @@ -234,11 +243,13 @@ static inline void r4k_blast_icache_page_indexed_setup(void) static void (* r4k_blast_icache)(void); -static inline void r4k_blast_icache_setup(void) +static void __init r4k_blast_icache_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); - if (ic_lsize == 16) + if (ic_lsize == 0) + r4k_blast_icache = (void *)cache_noop; + else if (ic_lsize == 16) r4k_blast_icache = blast_icache16; else if (ic_lsize == 32) { if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) @@ -253,12 +264,12 @@ static inline void r4k_blast_icache_setup(void) static void (* r4k_blast_scache_page)(unsigned long addr); -static inline void r4k_blast_scache_page_setup(void) +static void __init r4k_blast_scache_page_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) - r4k_blast_scache_page = (void *)no_sc_noop; + r4k_blast_scache_page = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache_page = blast_scache16_page; else if (sc_lsize == 32) @@ -271,12 +282,12 @@ static inline void r4k_blast_scache_page_setup(void) static void (* r4k_blast_scache_page_indexed)(unsigned long addr); -static inline void r4k_blast_scache_page_indexed_setup(void) +static void __init r4k_blast_scache_page_indexed_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) - r4k_blast_scache_page_indexed = (void *)no_sc_noop; + r4k_blast_scache_page_indexed = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache_page_indexed = blast_scache16_page_indexed; else if (sc_lsize == 32) @@ -289,12 +300,12 @@ static inline void r4k_blast_scache_page_indexed_setup(void) static void (* r4k_blast_scache)(void); -static inline void r4k_blast_scache_setup(void) +static void __init r4k_blast_scache_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) - r4k_blast_scache = (void *)no_sc_noop; + r4k_blast_scache = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache = blast_scache16; else if (sc_lsize == 32) @@ -464,7 +475,7 @@ static inline void local_r4k_flush_cache_page(void *args) } } if (exec) { - if (cpu_has_vtag_icache) { + if (cpu_has_vtag_icache && mm == current->active_mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) @@ -508,7 +519,7 @@ static inline void local_r4k_flush_icache_range(void *args) unsigned long end = fir_args->end; if (!cpu_has_ic_fills_f_dc) { - if (end - start > dcache_size) { + if (end - start >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; @@ -567,7 +578,7 @@ static inline void local_r4k_flush_icache_page(void *args) * secondary cache will result in any entries in the primary caches * also getting invalidated which hopefully is a bit more economical. */ - if (cpu_has_subset_pcaches) { + if (cpu_has_inclusive_pcaches) { unsigned long addr = (unsigned long) page_address(page); r4k_blast_scache_page(addr); @@ -588,7 +599,7 @@ static inline void local_r4k_flush_icache_page(void *args) * We're not sure of the virtual address(es) involved here, so * we have to flush the entire I-cache. */ - if (cpu_has_vtag_icache) { + if (cpu_has_vtag_icache && vma->vm_mm == current->active_mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, vma->vm_mm) != 0) @@ -623,7 +634,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) /* Catch bad driver code */ BUG_ON(size == 0); - if (cpu_has_subset_pcaches) { + if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); else @@ -651,7 +662,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) /* Catch bad driver code */ BUG_ON(size == 0); - if (cpu_has_subset_pcaches) { + if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); else @@ -683,10 +694,12 @@ static void local_r4k_flush_cache_sigtramp(void * arg) unsigned long addr = (unsigned long) arg; R4600_HIT_CACHEOP_WAR_IMPL; - protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); + if (dc_lsize) + protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); if (!cpu_icache_snoops_remote_store && scache_size) protected_writeback_scache_line(addr & ~(sc_lsize - 1)); - protected_flush_icache_line(addr & ~(ic_lsize - 1)); + if (ic_lsize) + protected_flush_icache_line(addr & ~(ic_lsize - 1)); if (MIPS4K_ICACHE_REFILL_WAR) { __asm__ __volatile__ ( ".set push\n\t" @@ -849,15 +862,18 @@ static void __init probe_pcache(void) break; case CPU_VR4133: - write_c0_config(config & ~CONF_EB); + write_c0_config(config & ~VR41_CONF_P4K); case CPU_VR4131: /* Workaround for cache instruction bug of VR4131 */ if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || c->processor_id == 0x0c82U) { - config &= ~0x00000030U; - config |= 0x00410000U; + config |= 0x00400000U; + if (c->processor_id == 0x0c80U) + config |= VR41_CONF_BP; write_c0_config(config); - } + } else + c->options |= MIPS_CPU_CACHE_CDEX_P; + icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; @@ -867,8 +883,6 @@ static void __init probe_pcache(void) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; c->dcache.waybit = __ffs(dcache_size/2); - - c->options |= MIPS_CPU_CACHE_CDEX_P; break; case CPU_VR41XX: @@ -973,8 +987,10 @@ static void __init probe_pcache(void) c->icache.waysize = icache_size / c->icache.ways; c->dcache.waysize = dcache_size / c->dcache.ways; - c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); - c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); + c->icache.sets = c->icache.linesz ? + icache_size / (c->icache.linesz * c->icache.ways) : 0; + c->dcache.sets = c->dcache.linesz ? + dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; /* * R10000 and R12000 P-caches are odd in a positive way. They're 32kB @@ -993,10 +1009,16 @@ static void __init probe_pcache(void) break; case CPU_24K: case CPU_34K: - if (!(read_c0_config7() & (1 << 16))) + case CPU_74K: + if ((read_c0_config7() & (1 << 16))) { + /* effectively physically indexed dcache, + thus no virtual aliases. */ + c->dcache.flags |= MIPS_CACHE_PINDEX; + break; + } default: - if (c->dcache.waysize > PAGE_SIZE) - c->dcache.flags |= MIPS_CACHE_ALIASES; + if (c->dcache.waysize > PAGE_SIZE) + c->dcache.flags |= MIPS_CACHE_ALIASES; } switch (c->cputype) { @@ -1092,6 +1114,7 @@ static int __init probe_scache(void) extern int r5k_sc_init(void); extern int rm7k_sc_init(void); +extern int mips_sc_init(void); static void __init setup_scache(void) { @@ -1139,17 +1162,29 @@ static void __init setup_scache(void) return; default: + if (c->isa_level == MIPS_CPU_ISA_M32R1 || + c->isa_level == MIPS_CPU_ISA_M32R2 || + c->isa_level == MIPS_CPU_ISA_M64R1 || + c->isa_level == MIPS_CPU_ISA_M64R2) { +#ifdef CONFIG_MIPS_CPU_SCACHE + if (mips_sc_init ()) { + scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; + printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", + scache_size >> 10, + way_string[c->scache.ways], c->scache.linesz); + } +#else + if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) + panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); +#endif + return; + } sc_present = 0; } if (!sc_present) return; - if ((c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M64R1) && - !(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) - panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); - /* compute a couple of other cache variables */ c->scache.waysize = scache_size / c->scache.ways; @@ -1158,7 +1193,7 @@ static void __init setup_scache(void) printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); - c->options |= MIPS_CPU_SUBSET_CACHES; + c->options |= MIPS_CPU_INCLUSIVE_CACHES; } void au1x00_fixup_config_od(void) @@ -1186,7 +1221,7 @@ void au1x00_fixup_config_od(void) } } -static inline void coherency_setup(void) +static void __init coherency_setup(void) { change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); @@ -1207,7 +1242,7 @@ static inline void coherency_setup(void) clear_c0_config(CONF_CU); break; /* - * We need to catch the ealry Alchemy SOCs with + * We need to catch the early Alchemy SOCs with * the write-only co_config.od bit and set it back to one... */ case CPU_AU1000: /* rev. DA, HA, HB */ @@ -1246,15 +1281,17 @@ void __init r4k_cache_init(void) * This code supports virtually indexed processors and will be * unnecessarily inefficient on physically indexed processors. */ - shm_align_mask = max_t( unsigned long, - c->dcache.sets * c->dcache.linesz - 1, - PAGE_SIZE - 1); - + if (c->dcache.linesz) + shm_align_mask = max_t( unsigned long, + c->dcache.sets * c->dcache.linesz - 1, + PAGE_SIZE - 1); + else + shm_align_mask = PAGE_SIZE-1; flush_cache_all = r4k_flush_cache_all; __flush_cache_all = r4k___flush_cache_all; flush_cache_mm = r4k_flush_cache_mm; flush_cache_page = r4k_flush_cache_page; - flush_icache_page = r4k_flush_icache_page; + __flush_icache_page = r4k_flush_icache_page; flush_cache_range = r4k_flush_cache_range; flush_cache_sigtramp = r4k_flush_cache_sigtramp; diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c index f9b129491b1e..16bad7c0a63f 100644 --- a/arch/mips/mm/c-sb1.c +++ b/arch/mips/mm/c-sb1.c @@ -18,7 +18,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include <linux/config.h> #include <linux/init.h> #include <asm/asm.h> @@ -156,6 +155,26 @@ static inline void __sb1_flush_icache_all(void) } /* + * Invalidate a range of the icache. The addresses are virtual, and + * the cache is virtually indexed and tagged. However, we don't + * necessarily have the right ASID context, so use index ops instead + * of hit ops. + */ +static inline void __sb1_flush_icache_range(unsigned long start, + unsigned long end) +{ + start &= ~(icache_line_size - 1); + end = (end + icache_line_size - 1) & ~(icache_line_size - 1); + + while (start != end) { + cache_set_op(Index_Invalidate_I, start & icache_index_mask); + start += icache_line_size; + } + mispredict(); + sync(); +} + +/* * Flush the icache for a given physical page. Need to writeback the * dcache first, then invalidate the icache. If the page isn't * executable, nothing is required. @@ -174,8 +193,11 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long /* * Bumping the ASID is probably cheaper than the flush ... */ - if (cpu_context(cpu, vma->vm_mm) != 0) - drop_mmu_context(vma->vm_mm, cpu); + if (vma->vm_mm == current->active_mm) { + if (cpu_context(cpu, vma->vm_mm) != 0) + drop_mmu_context(vma->vm_mm, cpu); + } else + __sb1_flush_icache_range(addr, addr + PAGE_SIZE); } #ifdef CONFIG_SMP @@ -211,26 +233,6 @@ void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsign __attribute__((alias("local_sb1_flush_cache_page"))); #endif -/* - * Invalidate a range of the icache. The addresses are virtual, and - * the cache is virtually indexed and tagged. However, we don't - * necessarily have the right ASID context, so use index ops instead - * of hit ops. - */ -static inline void __sb1_flush_icache_range(unsigned long start, - unsigned long end) -{ - start &= ~(icache_line_size - 1); - end = (end + icache_line_size - 1) & ~(icache_line_size - 1); - - while (start != end) { - cache_set_op(Index_Invalidate_I, start & icache_index_mask); - start += icache_line_size; - } - mispredict(); - sync(); -} - /* * Invalidate all caches on this CPU @@ -327,9 +329,12 @@ static void local_sb1_flush_icache_page(struct vm_area_struct *vma, * If there's a context, bump the ASID (cheaper than a flush, * since we don't know VAs!) */ - if (cpu_context(cpu, vma->vm_mm) != 0) { - drop_mmu_context(vma->vm_mm, cpu); - } + if (vma->vm_mm == current->active_mm) { + if (cpu_context(cpu, vma->vm_mm) != 0) + drop_mmu_context(vma->vm_mm, cpu); + } else + __sb1_flush_icache_range(start, start + PAGE_SIZE); + } #ifdef CONFIG_SMP @@ -521,7 +526,7 @@ void sb1_cache_init(void) /* These routines are for Icache coherence with the Dcache */ flush_icache_range = sb1_flush_icache_range; - flush_icache_page = sb1_flush_icache_page; + __flush_icache_page = sb1_flush_icache_page; flush_icache_all = __sb1_flush_icache_all; /* local only */ /* This implies an Icache flush too, so can't be nop'ed */ diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index 5dfc9b1901f6..932a09d7ef84 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -382,7 +382,7 @@ void __init tx39_cache_init(void) flush_cache_mm = (void *) tx39h_flush_icache_all; flush_cache_range = (void *) tx39h_flush_icache_all; flush_cache_page = (void *) tx39h_flush_icache_all; - flush_icache_page = (void *) tx39h_flush_icache_all; + __flush_icache_page = (void *) tx39h_flush_icache_all; flush_icache_range = (void *) tx39h_flush_icache_all; flush_cache_sigtramp = (void *) tx39h_flush_icache_all; @@ -408,7 +408,7 @@ void __init tx39_cache_init(void) flush_cache_mm = tx39_flush_cache_mm; flush_cache_range = tx39_flush_cache_range; flush_cache_page = tx39_flush_cache_page; - flush_icache_page = tx39_flush_icache_page; + __flush_icache_page = tx39_flush_icache_page; flush_icache_range = tx39_flush_icache_range; flush_cache_sigtramp = tx39_flush_cache_sigtramp; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 83a56296be86..40c8b0235183 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -5,7 +5,6 @@ * * Copyright (C) 1994 - 2003 by Ralf Baechle */ -#include <linux/config.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> @@ -26,7 +25,7 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); void (*flush_icache_range)(unsigned long start, unsigned long end); -void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); +void (*__flush_icache_page)(struct vm_area_struct *vma, struct page *page); /* MIPS specific cache operations */ void (*flush_cache_sigtramp)(unsigned long addr); @@ -71,6 +70,8 @@ void __flush_dcache_page(struct page *page) struct address_space *mapping = page_mapping(page); unsigned long addr; + if (PageHighMem(page)) + return; if (mapping && !mapping_mapped(mapping)) { SetPageDcacheDirty(page); return; @@ -92,16 +93,16 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, { struct page *page; unsigned long pfn, addr; + int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); - if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) && - Page_dcache_dirty(page)) { - if (pages_do_alias((unsigned long)page_address(page), - address & PAGE_MASK)) { - addr = (unsigned long) page_address(page); + if (unlikely(!pfn_valid(pfn))) + return; + page = pfn_to_page(pfn); + if (page_mapping(page) && Page_dcache_dirty(page)) { + addr = (unsigned long) page_address(page); + if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); - } - ClearPageDcacheDirty(page); } } diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c index 1cf3c6006ccd..e19fbb9ee47f 100644 --- a/arch/mips/mm/cerr-sb1.c +++ b/arch/mips/mm/cerr-sb1.c @@ -15,7 +15,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include <linux/config.h> #include <linux/sched.h> #include <asm/mipsregs.h> #include <asm/sibyte/sb1250.h> diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c index f6b3c722230c..7fa5fd16e46b 100644 --- a/arch/mips/mm/dma-coherent.c +++ b/arch/mips/mm/dma-coherent.c @@ -7,7 +7,6 @@ * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. */ -#include <linux/config.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/mm.h> diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index cd4ea8474f89..2eeffe5c2a3a 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -7,7 +7,6 @@ * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. */ -#include <linux/config.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/module.h> diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index e3a617224868..8423d8590779 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -89,7 +89,7 @@ good_area: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) goto bad_area; } @@ -171,7 +171,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (tsk->pid == 1) { + if (is_init(tsk)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 0c544375b856..99ebf3ccc222 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -1,4 +1,3 @@ -#include <linux/config.h> #include <linux/module.h> #include <linux/highmem.h> #include <asm/tlbflush.h> diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 33f6e1cdfd5b..5b06349af2d5 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -8,7 +8,6 @@ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/config.h> #include <linux/init.h> #include <linux/module.h> #include <linux/signal.h> @@ -140,10 +139,36 @@ void __init fixrange_init(unsigned long start, unsigned long end, #ifndef CONFIG_NEED_MULTIPLE_NODES extern void pagetable_init(void); +static int __init page_is_ram(unsigned long pagenr) +{ + int i; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + unsigned long addr, end; + + if (boot_mem_map.map[i].type != BOOT_MEM_RAM) + /* not usable memory */ + continue; + + addr = PFN_UP(boot_mem_map.map[i].addr); + end = PFN_DOWN(boot_mem_map.map[i].addr + + boot_mem_map.map[i].size); + + if (pagenr >= addr && pagenr < end) + return 1; + } + + return 0; +} + void __init paging_init(void) { - unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; + unsigned long zones_size[] = { 0, }; unsigned long max_dma, high, low; +#ifndef CONFIG_FLATMEM + unsigned long zholes_size[] = { 0, }; + unsigned long i, j, pfn; +#endif pagetable_init(); @@ -175,29 +200,16 @@ void __init paging_init(void) zones_size[ZONE_HIGHMEM] = high - low; #endif +#ifdef CONFIG_FLATMEM free_area_init(zones_size); -} - -static inline int page_is_ram(unsigned long pagenr) -{ - int i; - - for (i = 0; i < boot_mem_map.nr_map; i++) { - unsigned long addr, end; - - if (boot_mem_map.map[i].type != BOOT_MEM_RAM) - /* not usable memory */ - continue; - - addr = PFN_UP(boot_mem_map.map[i].addr); - end = PFN_DOWN(boot_mem_map.map[i].addr + - boot_mem_map.map[i].size); - - if (pagenr >= addr && pagenr < end) - return 1; - } - - return 0; +#else + pfn = 0; + for (i = 0; i < MAX_NR_ZONES; i++) + for (j = 0; j < zones_size[i]; j++, pfn++) + if (!page_is_ram(pfn)) + zholes_size[i]++; + free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size); +#endif } static struct kcore_list kcore_mem, kcore_vmalloc; @@ -214,9 +226,9 @@ void __init mem_init(void) #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" #endif - max_mapnr = num_physpages = highend_pfn; + max_mapnr = highend_pfn; #else - max_mapnr = num_physpages = max_low_pfn; + max_mapnr = max_low_pfn; #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); @@ -230,6 +242,7 @@ void __init mem_init(void) if (PageReserved(pfn_to_page(tmp))) reservedpages++; } + num_physpages = ram; #ifdef CONFIG_HIGHMEM for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { @@ -248,6 +261,7 @@ void __init mem_init(void) totalhigh_pages++; } totalram_pages += totalhigh_pages; + num_physpages += totalhigh_pages; #endif codesize = (unsigned long) &_etext - (unsigned long) &_text; diff --git a/arch/mips/mm/pg-sb1.c b/arch/mips/mm/pg-sb1.c index 148c65b9cd8b..fc3c7878fb45 100644 --- a/arch/mips/mm/pg-sb1.c +++ b/arch/mips/mm/pg-sb1.c @@ -22,7 +22,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/smp.h> diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index 4a3c4919e314..4bdaa05f485b 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -5,7 +5,6 @@ * * Copyright (C) 2003 by Ralf Baechle */ -#include <linux/config.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/bootmem.h> diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index 3fe94202da8c..c93aa6cbcaca 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -1,4 +1,3 @@ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/swap.h> @@ -16,6 +15,8 @@ void show_mem(void) printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); pfn = max_mapnr; while (pfn-- > 0) { + if (!pfn_valid(pfn)) + continue; page = pfn_to_page(pfn); total++; if (PageHighMem(page)) diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c new file mode 100644 index 000000000000..42b50964c644 --- /dev/null +++ b/arch/mips/mm/sc-mips.c @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2006 Chris Dearman (chris@mips.com), + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> + +#include <asm/mipsregs.h> +#include <asm/bcache.h> +#include <asm/cacheops.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/system.h> +#include <asm/mmu_context.h> +#include <asm/r4kcache.h> + +/* + * MIPS32/MIPS64 L2 cache handling + */ + +/* + * Writeback and invalidate the secondary cache before DMA. + */ +static void mips_sc_wback_inv(unsigned long addr, unsigned long size) +{ + blast_scache_range(addr, addr + size); +} + +/* + * Invalidate the secondary cache before DMA. + */ +static void mips_sc_inv(unsigned long addr, unsigned long size) +{ + blast_inv_scache_range(addr, addr + size); +} + +static void mips_sc_enable(void) +{ + /* L2 cache is permanently enabled */ +} + +static void mips_sc_disable(void) +{ + /* L2 cache is permanently enabled */ +} + +static struct bcache_ops mips_sc_ops = { + .bc_enable = mips_sc_enable, + .bc_disable = mips_sc_disable, + .bc_wback_inv = mips_sc_wback_inv, + .bc_inv = mips_sc_inv +}; + +static inline int __init mips_sc_probe(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned int config1, config2; + unsigned int tmp; + + /* Mark as not present until probe completed */ + c->scache.flags |= MIPS_CACHE_NOT_PRESENT; + + /* Ignore anything but MIPSxx processors */ + if (c->isa_level != MIPS_CPU_ISA_M32R1 && + c->isa_level != MIPS_CPU_ISA_M32R2 && + c->isa_level != MIPS_CPU_ISA_M64R1 && + c->isa_level != MIPS_CPU_ISA_M64R2) + return 0; + + /* Does this MIPS32/MIPS64 CPU have a config2 register? */ + config1 = read_c0_config1(); + if (!(config1 & MIPS_CONF_M)) + return 0; + + config2 = read_c0_config2(); + tmp = (config2 >> 4) & 0x0f; + if (0 < tmp && tmp <= 7) + c->scache.linesz = 2 << tmp; + else + return 0; + + tmp = (config2 >> 8) & 0x0f; + if (0 <= tmp && tmp <= 7) + c->scache.sets = 64 << tmp; + else + return 0; + + tmp = (config2 >> 0) & 0x0f; + if (0 <= tmp && tmp <= 7) + c->scache.ways = tmp + 1; + else + return 0; + + c->scache.waysize = c->scache.sets * c->scache.linesz; + c->scache.waybit = __ffs(c->scache.waysize); + + c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; + + return 1; +} + +int __init mips_sc_init(void) +{ + int found = mips_sc_probe (); + if (found) { + mips_sc_enable(); + bcops = &mips_sc_ops; + } + return found; +} + diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 965cb4c4359d..2e0e21ef433e 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -8,7 +8,6 @@ * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/config.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/mm.h> @@ -27,11 +26,6 @@ extern void build_tlb_refill_handler(void); */ #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) -/* CP0 hazard avoidance. */ -#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ - "nop; nop; nop; nop; nop; nop;\n\t" \ - ".set reorder\n\t") - /* Atomicity and interruptability */ #ifdef CONFIG_MIPS_MT_SMTC @@ -127,7 +121,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); - BARRIER; + tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); @@ -169,7 +163,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); - BARRIER; + tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); @@ -203,7 +197,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) write_c0_entryhi(page | newpid); mtc0_tlbw_hazard(); tlb_probe(); - BARRIER; + tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); @@ -236,7 +230,7 @@ void local_flush_tlb_one(unsigned long page) write_c0_entryhi(page); mtc0_tlbw_hazard(); tlb_probe(); - BARRIER; + tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); @@ -280,7 +274,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) pgdp = pgd_offset(vma->vm_mm, address); mtc0_tlbw_hazard(); tlb_probe(); - BARRIER; + tlb_probe_hazard(); pudp = pud_offset(pgdp, address); pmdp = pmd_offset(pudp, address); idx = read_c0_index(); @@ -321,7 +315,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, pgdp = pgd_offset(vma->vm_mm, address); mtc0_tlbw_hazard(); tlb_probe(); - BARRIER; + tlb_probe_hazard(); pmdp = pmd_offset(pgdp, address); idx = read_c0_index(); ptep = pte_offset_map(pmdp, address); @@ -352,7 +346,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, wired = read_c0_wired(); write_c0_wired(wired + 1); write_c0_index(wired); - BARRIER; + tlbw_use_hazard(); /* What is the hazard here? */ write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); @@ -362,7 +356,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, tlbw_use_hazard(); write_c0_entryhi(old_ctx); - BARRIER; + tlbw_use_hazard(); /* What is the hazard here? */ write_c0_pagemask(old_pagemask); local_flush_tlb_all(); EXIT_CRITICAL(flags); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 1bfb09198ce3..266a47d65eed 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -8,7 +8,6 @@ * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/config.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/mm.h> diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 54507be2ab5b..375e0991505d 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -5,7 +5,7 @@ * * Synthesize TLB refill handlers at runtime. * - * Copyright (C) 2004,2005 by Thiemo Seufer + * Copyright (C) 2004,2005,2006 by Thiemo Seufer * Copyright (C) 2005 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) * @@ -21,7 +21,6 @@ #include <stdarg.h> -#include <linux/config.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/types.h> @@ -36,8 +35,6 @@ #include <asm/smp.h> #include <asm/war.h> -/* #define DEBUG_TLB */ - static __init int __attribute__((unused)) r45k_bvahwbug(void) { /* XXX: We should probe for the presence of this bug, but we don't. */ @@ -729,6 +726,7 @@ static void __init build_r3000_tlb_refill_handler(void) { long pgdc = (long)pgd_current; u32 *p; + int i; memset(tlb_handler, 0, sizeof(tlb_handler)); p = tlb_handler; @@ -754,16 +752,14 @@ static void __init build_r3000_tlb_refill_handler(void) if (p > tlb_handler + 32) panic("TLB refill handler space exceeded"); - printk("Synthesized TLB refill handler (%u instructions).\n", - (unsigned int)(p - tlb_handler)); -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB refill handler (%u instructions).\n", + (unsigned int)(p - tlb_handler)); - for (i = 0; i < (p - tlb_handler); i++) - printk("%08x\n", tlb_handler[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - tlb_handler); i++) + pr_debug("\t.word 0x%08x\n", tlb_handler[i]); + pr_debug("\t.set pop\n"); memcpy((void *)ebase, tlb_handler, 0x80); } @@ -1176,6 +1172,7 @@ static void __init build_r4000_tlb_refill_handler(void) struct reloc *r = relocs; u32 *f; unsigned int final_len; + int i; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); @@ -1273,24 +1270,21 @@ static void __init build_r4000_tlb_refill_handler(void) #endif /* CONFIG_64BIT */ resolve_relocs(relocs, labels); - printk("Synthesized TLB refill handler (%u instructions).\n", - final_len); - -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB refill handler (%u instructions).\n", + final_len); - f = final_handler; + f = final_handler; #ifdef CONFIG_64BIT - if (final_len > 32) - final_len = 64; - else - f = final_handler + 32; + if (final_len > 32) + final_len = 64; + else + f = final_handler + 32; #endif /* CONFIG_64BIT */ - for (i = 0; i < final_len; i++) - printk("%08x\n", f[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < final_len; i++) + pr_debug("\t.word 0x%08x\n", f[i]); + pr_debug("\t.set pop\n"); memcpy((void *)ebase, final_handler, 0x100); } @@ -1523,6 +1517,7 @@ static void __init build_r3000_tlb_load_handler(void) u32 *p = handle_tlbl; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); @@ -1542,17 +1537,14 @@ static void __init build_r3000_tlb_load_handler(void) panic("TLB load handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB load handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbl)); - -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB load handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbl)); - for (i = 0; i < (p - handle_tlbl); i++) - printk("%08x\n", handle_tlbl[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbl); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbl[i]); + pr_debug("\t.set pop\n"); } static void __init build_r3000_tlb_store_handler(void) @@ -1560,6 +1552,7 @@ static void __init build_r3000_tlb_store_handler(void) u32 *p = handle_tlbs; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); @@ -1579,17 +1572,14 @@ static void __init build_r3000_tlb_store_handler(void) panic("TLB store handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB store handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbs)); + pr_info("Synthesized TLB store handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbs)); -#ifdef DEBUG_TLB - { - int i; - - for (i = 0; i < (p - handle_tlbs); i++) - printk("%08x\n", handle_tlbs[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbs); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbs[i]); + pr_debug("\t.set pop\n"); } static void __init build_r3000_tlb_modify_handler(void) @@ -1597,6 +1587,7 @@ static void __init build_r3000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); @@ -1616,17 +1607,14 @@ static void __init build_r3000_tlb_modify_handler(void) panic("TLB modify handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB modify handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbm)); + pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbm)); -#ifdef DEBUG_TLB - { - int i; - - for (i = 0; i < (p - handle_tlbm); i++) - printk("%08x\n", handle_tlbm[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbm); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbm[i]); + pr_debug("\t.set pop\n"); } /* @@ -1678,6 +1666,7 @@ static void __init build_r4000_tlb_load_handler(void) u32 *p = handle_tlbl; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); @@ -1705,17 +1694,14 @@ static void __init build_r4000_tlb_load_handler(void) panic("TLB load handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB load handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbl)); - -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB load handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbl)); - for (i = 0; i < (p - handle_tlbl); i++) - printk("%08x\n", handle_tlbl[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbl); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbl[i]); + pr_debug("\t.set pop\n"); } static void __init build_r4000_tlb_store_handler(void) @@ -1723,6 +1709,7 @@ static void __init build_r4000_tlb_store_handler(void) u32 *p = handle_tlbs; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); @@ -1741,17 +1728,14 @@ static void __init build_r4000_tlb_store_handler(void) panic("TLB store handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB store handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbs)); - -#ifdef DEBUG_TLB - { - int i; + pr_info("Synthesized TLB store handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbs)); - for (i = 0; i < (p - handle_tlbs); i++) - printk("%08x\n", handle_tlbs[i]); - } -#endif + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbs); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbs[i]); + pr_debug("\t.set pop\n"); } static void __init build_r4000_tlb_modify_handler(void) @@ -1759,6 +1743,7 @@ static void __init build_r4000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct label *l = labels; struct reloc *r = relocs; + int i; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); @@ -1778,17 +1763,14 @@ static void __init build_r4000_tlb_modify_handler(void) panic("TLB modify handler fastpath space exceeded"); resolve_relocs(relocs, labels); - printk("Synthesized TLB modify handler fastpath (%u instructions).\n", - (unsigned int)(p - handle_tlbm)); - -#ifdef DEBUG_TLB - { - int i; - - for (i = 0; i < (p - handle_tlbm); i++) - printk("%08x\n", handle_tlbm[i]); - } -#endif + pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n", + (unsigned int)(p - handle_tlbm)); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (p - handle_tlbm); i++) + pr_debug("\t.word 0x%08x\n", handle_tlbm[i]); + pr_debug("\t.set pop\n"); } void __init build_tlb_refill_handler(void) |