diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/mips/mm/c-r3k.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 53 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 26 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 12 | ||||
-rw-r--r-- | arch/mips/mm/hugetlbpage.c | 14 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 149 | ||||
-rw-r--r-- | arch/mips/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/mmap.c | 84 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-32.c | 26 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 27 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r8k.c | 239 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 92 |
16 files changed, 249 insertions, 503 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 1e8d335025d7..46f483e952c8 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -28,11 +28,11 @@ obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o +obj-$(CONFIG_CPU_R3K_TLB) += tlb-r3k.o obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o -obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o -obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o +obj-$(CONFIG_CPU_R3000) += c-r3k.o obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o -obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o +obj-$(CONFIG_CPU_TX39XX) += c-tx39.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 0ca401ddf3b7..15bb8cf59828 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -241,6 +241,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; @@ -253,7 +254,8 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, return; pgdp = pgd_offset(mm, addr); - pudp = pud_offset(pgdp, addr); + p4dp = p4d_offset(pgdp, addr); + pudp = pud_offset(p4dp, addr); pmdp = pmd_offset(pudp, addr); ptep = pte_offset(pmdp, addr); diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 5166e38cd1c6..5f3d0103b95d 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -271,12 +271,14 @@ static inline void tx49_blast_icache32(void) /* I'm in even chunk. blast odd chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start + 0x400; addr < end; addr += 0x400 * 2) - cache32_unroll32(addr|ws, Index_Invalidate_I); + cache_unroll(32, kernel_cache, Index_Invalidate_I, + addr | ws, 32); CACHE32_UNROLL32_ALIGN; /* I'm in odd chunk. blast even chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 0x400 * 2) - cache32_unroll32(addr|ws, Index_Invalidate_I); + cache_unroll(32, kernel_cache, Index_Invalidate_I, + addr | ws, 32); } static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) @@ -302,12 +304,14 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page) /* I'm in even chunk. blast odd chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start + 0x400; addr < end; addr += 0x400 * 2) - cache32_unroll32(addr|ws, Index_Invalidate_I); + cache_unroll(32, kernel_cache, Index_Invalidate_I, + addr | ws, 32); CACHE32_UNROLL32_ALIGN; /* I'm in odd chunk. blast even chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 0x400 * 2) - cache32_unroll32(addr|ws, Index_Invalidate_I); + cache_unroll(32, kernel_cache, Index_Invalidate_I, + addr | ws, 32); } static void (* r4k_blast_icache_page)(unsigned long addr); @@ -320,7 +324,7 @@ static void r4k_blast_icache_page_setup(void) r4k_blast_icache_page = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache_page = blast_icache16_page; - else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) + else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF) r4k_blast_icache_page = loongson2_blast_icache32_page; else if (ic_lsize == 32) r4k_blast_icache_page = blast_icache32_page; @@ -369,7 +373,7 @@ static void r4k_blast_icache_page_indexed_setup(void) else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache_page_indexed = tx49_blast_icache32_page_indexed; - else if (current_cpu_type() == CPU_LOONGSON2) + else if (current_cpu_type() == CPU_LOONGSON2EF) r4k_blast_icache_page_indexed = loongson2_blast_icache32_page_indexed; else @@ -395,7 +399,7 @@ static void r4k_blast_icache_setup(void) r4k_blast_icache = blast_r4600_v1_icache32; else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache = tx49_blast_icache32; - else if (current_cpu_type() == CPU_LOONGSON2) + else if (current_cpu_type() == CPU_LOONGSON2EF) r4k_blast_icache = loongson2_blast_icache32; else r4k_blast_icache = blast_icache32; @@ -465,7 +469,7 @@ static void r4k_blast_scache_node_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); - if (current_cpu_type() != CPU_LOONGSON3) + if (current_cpu_type() != CPU_LOONGSON64) r4k_blast_scache_node = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache_node = blast_scache16_node; @@ -480,7 +484,7 @@ static void r4k_blast_scache_node_setup(void) static inline void local_r4k___flush_cache_all(void * args) { switch (current_cpu_type()) { - case CPU_LOONGSON2: + case CPU_LOONGSON2EF: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: @@ -497,7 +501,7 @@ static inline void local_r4k___flush_cache_all(void * args) r4k_blast_scache(); break; - case CPU_LOONGSON3: + case CPU_LOONGSON64: /* Use get_ebase_cpunum() for both NUMA=y/n */ r4k_blast_scache_node(get_ebase_cpunum() >> 2); break; @@ -650,6 +654,7 @@ static inline void local_r4k_flush_cache_page(void *args) struct mm_struct *mm = vma->vm_mm; int map_coherent = 0; pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; @@ -664,7 +669,8 @@ static inline void local_r4k_flush_cache_page(void *args) addr &= PAGE_MASK; pgdp = pgd_offset(mm, addr); - pudp = pud_offset(pgdp, addr); + p4dp = p4d_offset(pgdp, addr); + pudp = pud_offset(p4dp, addr); pmdp = pmd_offset(pudp, addr); ptep = pte_offset(pmdp, addr); @@ -770,7 +776,7 @@ static inline void __local_r4k_flush_icache_range(unsigned long start, r4k_blast_icache(); else { switch (boot_cpu_type()) { - case CPU_LOONGSON2: + case CPU_LOONGSON2EF: protected_loongson2_blast_icache_range(start, end); break; @@ -863,7 +869,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) { - if (current_cpu_type() != CPU_LOONGSON3) + if (current_cpu_type() != CPU_LOONGSON64) r4k_blast_scache(); else r4k_blast_scache_node(pa_to_nid(addr)); @@ -904,7 +910,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) { - if (current_cpu_type() != CPU_LOONGSON3) + if (current_cpu_type() != CPU_LOONGSON64) r4k_blast_scache(); else r4k_blast_scache_node(pa_to_nid(addr)); @@ -1098,7 +1104,6 @@ static void probe_pcache(void) c->options |= MIPS_CPU_CACHE_CDEX_P; break; - case CPU_R5432: case CPU_R5500: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); @@ -1134,7 +1139,6 @@ static void probe_pcache(void) case CPU_R4400PC: case CPU_R4400SC: case CPU_R4400MC: - case CPU_R4300: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 1; @@ -1226,7 +1230,7 @@ static void probe_pcache(void) c->options |= MIPS_CPU_PREFETCH; break; - case CPU_LOONGSON2: + case CPU_LOONGSON2EF: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); if (prid & 0x3) @@ -1244,7 +1248,7 @@ static void probe_pcache(void) c->dcache.waybit = 0; break; - case CPU_LOONGSON3: + case CPU_LOONGSON64: config1 = read_c0_config1(); lsize = (config1 >> 19) & 7; if (lsize) @@ -1269,7 +1273,8 @@ static void probe_pcache(void) c->dcache.ways * c->dcache.linesz; c->dcache.waybit = 0; - if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0) + if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >= + (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)) c->options |= MIPS_CPU_PREFETCH; break; @@ -1454,7 +1459,7 @@ static void probe_pcache(void) c->dcache.flags &= ~MIPS_CACHE_ALIASES; break; - case CPU_LOONGSON2: + case CPU_LOONGSON2EF: /* * LOONGSON2 has 4 way icache, but when using indexed cache op, * one op will act on all 4 ways @@ -1480,7 +1485,7 @@ static void probe_vcache(void) struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int config2, lsize; - if (current_cpu_type() != CPU_LOONGSON3) + if (current_cpu_type() != CPU_LOONGSON64) return; config2 = read_c0_config2(); @@ -1655,11 +1660,11 @@ static void setup_scache(void) #endif return; - case CPU_LOONGSON2: + case CPU_LOONGSON2EF: loongson2_sc_init(); return; - case CPU_LOONGSON3: + case CPU_LOONGSON64: loongson3_sc_init(); return; @@ -1928,7 +1933,7 @@ void r4k_cache_init(void) /* Optimization: an L2 flush implicitly flushes the L1 */ current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES; break; - case CPU_LOONGSON3: + case CPU_LOONGSON64: /* Loongson-3 maintains cache coherency by hardware */ __flush_cache_all = cache_noop; __flush_cache_vmap = cache_noop; diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index b7c8a9d79c35..686867270627 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -170,6 +170,7 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; @@ -183,7 +184,8 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page page &= PAGE_MASK; pgdp = pgd_offset(mm, page); - pudp = pud_offset(pgdp, page); + p4dp = p4d_offset(pgdp, page); + pudp = pud_offset(p4dp, page); pmdp = pmd_offset(pudp, page); ptep = pte_offset(pmdp, page); diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index ed56c6fa7be2..dc42ffc83825 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -27,7 +27,7 @@ * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. * SGI IP32 aka O2. */ -static inline bool cpu_needs_post_dma_flush(struct device *dev) +static inline bool cpu_needs_post_dma_flush(void) { switch (boot_cpu_type()) { case CPU_R10000: @@ -59,20 +59,6 @@ void *cached_kernel_address(void *addr) return __va(addr) - UNCAC_BASE; } -long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, - dma_addr_t dma_addr) -{ - return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr))); -} - -pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, - unsigned long attrs) -{ - if (attrs & DMA_ATTR_WRITE_COMBINE) - return pgprot_writecombine(prot); - return pgprot_noncached(prot); -} - static inline void dma_sync_virt(void *addr, size_t size, enum dma_data_direction dir) { @@ -126,17 +112,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, } while (left); } -void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { dma_sync_phys(paddr, size, dir); } #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU -void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { - if (cpu_needs_post_dma_flush(dev)) + if (cpu_needs_post_dma_flush()) dma_sync_phys(paddr, size, dir); } #endif diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index f589aa8f47d9..1e8d00793784 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -292,8 +292,9 @@ vmalloc_fault: * Do _not_ use "tsk" here. We might be inside * an interrupt in the middle of a task switch.. */ - int offset = __pgd_offset(address); + int offset = pgd_index(address); pgd_t *pgd, *pgd_k; + p4d_t *p4d, *p4d_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; @@ -305,8 +306,13 @@ vmalloc_fault: goto no_context; set_pgd(pgd, *pgd_k); - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); + p4d = p4d_offset(pgd, address); + p4d_k = p4d_offset(pgd_k, address); + if (!p4d_present(*p4d_k)) + goto no_context; + + pud = pud_offset(p4d, address); + pud_k = pud_offset(p4d_k, address); if (!pud_present(*pud_k)) goto no_context; diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index cef152234312..77ffece9c270 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -25,11 +25,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); if (pud) pte = (pte_t *)pmd_alloc(mm, pud, addr); @@ -40,14 +42,18 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd = NULL; pgd = pgd_offset(mm, addr); if (pgd_present(*pgd)) { - pud = pud_offset(pgd, addr); - if (pud_present(*pud)) - pmd = pmd_offset(pud, addr); + p4d = p4d_offset(pgd, addr); + if (p4d_present(*p4d)) { + pud = pud_offset(p4d, addr); + if (pud_present(*pud)) + pmd = pmd_offset(pud, addr); + } } return (pte_t *) pmd; } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 8a038b30d3c4..79684000de0e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -239,9 +239,9 @@ void __init fixrange_init(unsigned long start, unsigned long end, unsigned long vaddr; vaddr = start; - i = __pgd_offset(vaddr); - j = __pud_offset(vaddr); - k = __pmd_offset(vaddr); + i = pgd_index(vaddr); + j = pud_index(vaddr); + k = pmd_index(vaddr); pgd = pgd_base + i; for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { @@ -269,37 +269,46 @@ void __init fixrange_init(unsigned long start, unsigned long end, #endif } -unsigned __weak platform_maar_init(unsigned num_pairs) +struct maar_walk_info { + struct maar_config cfg[16]; + unsigned int num_cfg; +}; + +static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages, + void *data) { - struct maar_config cfg[BOOT_MEM_MAP_MAX]; - unsigned i, num_configured, num_cfg = 0; - - for (i = 0; i < boot_mem_map.nr_map; i++) { - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - case BOOT_MEM_INIT_RAM: - break; - default: - continue; - } + struct maar_walk_info *wi = data; + struct maar_config *cfg = &wi->cfg[wi->num_cfg]; + unsigned int maar_align; - /* Round lower up */ - cfg[num_cfg].lower = boot_mem_map.map[i].addr; - cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff; + /* MAAR registers hold physical addresses right shifted by 4 bits */ + maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4); - /* Round upper down */ - cfg[num_cfg].upper = boot_mem_map.map[i].addr + - boot_mem_map.map[i].size; - cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1; + /* Fill in the MAAR config entry */ + cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align); + cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1; + cfg->attrs = MIPS_MAAR_S; - cfg[num_cfg].attrs = MIPS_MAAR_S; - num_cfg++; - } + /* Ensure we don't overflow the cfg array */ + if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg))) + wi->num_cfg++; + + return 0; +} + + +unsigned __weak platform_maar_init(unsigned num_pairs) +{ + unsigned int num_configured; + struct maar_walk_info wi; - num_configured = maar_config(cfg, num_cfg, num_pairs); - if (num_configured < num_cfg) - pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", - num_pairs, num_cfg); + wi.num_cfg = 0; + walk_system_ram_range(0, max_pfn, &wi, maar_res_walk); + + num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs); + if (num_configured < wi.num_cfg) + pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n", + num_pairs, wi.num_cfg); return num_configured; } @@ -382,33 +391,6 @@ void maar_init(void) } #ifndef CONFIG_NEED_MULTIPLE_NODES -int page_is_ram(unsigned long pagenr) -{ - int i; - - for (i = 0; i < boot_mem_map.nr_map; i++) { - unsigned long addr, end; - - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - case BOOT_MEM_INIT_RAM: - break; - default: - /* not usable memory */ - continue; - } - - addr = PFN_UP(boot_mem_map.map[i].addr); - end = PFN_DOWN(boot_mem_map.map[i].addr + - boot_mem_map.map[i].size); - - if (pagenr >= addr && pagenr < end) - return 1; - } - - return 0; -} - void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; @@ -443,7 +425,7 @@ void __init paging_init(void) static struct kcore_list kcore_kseg0; #endif -static inline void mem_init_free_highmem(void) +static inline void __init mem_init_free_highmem(void) { #ifdef CONFIG_HIGHMEM unsigned long tmp; @@ -454,7 +436,7 @@ static inline void mem_init_free_highmem(void) for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { struct page *page = pfn_to_page(tmp); - if (!page_is_ram(tmp)) + if (!memblock_is_memory(PFN_PHYS(tmp))) SetPageReserved(page); else free_highmem_page(page); @@ -464,6 +446,12 @@ static inline void mem_init_free_highmem(void) void __init mem_init(void) { + /* + * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE + * bits to hold a full 32b physical address on MIPS32 systems. + */ + BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT)); + #ifdef CONFIG_HIGHMEM #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" @@ -520,6 +508,51 @@ void __ref free_initmem(void) free_initmem_default(POISON_FREE_INITMEM); } +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); + +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + return node_distance(cpu_to_node(from), cpu_to_node(to)); +} + +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, + size_t align) +{ + return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), + MEMBLOCK_ALLOC_ACCESSIBLE, + cpu_to_node(cpu)); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + memblock_free_early(__pa(ptr), size); +} + +void __init setup_per_cpu_areas(void) +{ + unsigned long delta; + unsigned int cpu; + int rc; + + /* + * Always reserve area for module percpu variables. That's + * what the legacy allocator did. + */ + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, + pcpu_cpu_distance, + pcpu_fc_alloc, pcpu_fc_free); + if (rc < 0) + panic("Failed to initialize percpu areas."); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; +} +#endif + #ifndef CONFIG_MIPS_PGD_C0_CONTEXT unsigned long pgd_current[NR_CPUS]; #endif diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 1601d90b087b..8317f337a86e 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -78,11 +78,15 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, flush_cache_all(); BUG_ON(address >= end); do { + p4d_t *p4d; pud_t *pud; pmd_t *pmd; error = -ENOMEM; - pud = pud_alloc(&init_mm, dir, address); + p4d = p4d_alloc(&init_mm, dir, address); + if (!p4d) + break; + pud = pud_alloc(&init_mm, p4d, address); if (!pud) break; pmd = pmd_alloc(&init_mm, pud, address); diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index d79f2b432318..00fe90c6db3e 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -20,33 +20,6 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); -/* gap between mmap and stack */ -#define MIN_GAP (128*1024*1024UL) -#define MAX_GAP ((TASK_SIZE)/6*5) - -static int mmap_is_legacy(struct rlimit *rlim_stack) -{ - if (current->personality & ADDR_COMPAT_LAYOUT) - return 1; - - if (rlim_stack->rlim_cur == RLIM_INFINITY) - return 1; - - return sysctl_legacy_va_layout; -} - -static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) -{ - unsigned long gap = rlim_stack->rlim_cur; - - if (gap < MIN_GAP) - gap = MIN_GAP; - else if (gap > MAX_GAP) - gap = MAX_GAP; - - return PAGE_ALIGN(TASK_SIZE - gap - rnd); -} - #define COLOUR_ALIGN(addr, pgoff) \ ((((addr) + shm_align_mask) & ~shm_align_mask) + \ (((pgoff) << PAGE_SHIFT) & shm_align_mask)) @@ -144,63 +117,6 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, addr0, len, pgoff, flags, DOWN); } -unsigned long arch_mmap_rnd(void) -{ - unsigned long rnd; - -#ifdef CONFIG_COMPAT - if (TASK_IS_32BIT_ADDR) - rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); - else -#endif /* CONFIG_COMPAT */ - rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); - - return rnd << PAGE_SHIFT; -} - -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) -{ - unsigned long random_factor = 0UL; - - if (current->flags & PF_RANDOMIZE) - random_factor = arch_mmap_rnd(); - - if (mmap_is_legacy(rlim_stack)) { - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(random_factor, rlim_stack); - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } -} - -static inline unsigned long brk_rnd(void) -{ - unsigned long rnd = get_random_long(); - - rnd = rnd << PAGE_SHIFT; - /* 8MB for 32bit, 256MB for 64bit */ - if (TASK_IS_32BIT_ADDR) - rnd = rnd & 0x7ffffful; - else - rnd = rnd & 0xffffffful; - - return rnd; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long base = mm->brk; - unsigned long ret; - - ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - - return ret; -} - bool __virt_addr_valid(const volatile void *kaddr) { unsigned long vaddr = (unsigned long)kaddr; diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 56e4f8bffd4c..c5578897a4fa 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -187,7 +187,7 @@ static void set_prefetch_parameters(void) } break; - case CPU_LOONGSON3: + case CPU_LOONGSON64: /* Loongson-3 only support the Pref_Load/Pref_Store. */ pref_bias_clear_store = 128; pref_bias_copy_load = 128; diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index e2a33adc0f29..37c7a01427d2 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -12,6 +12,7 @@ #include <asm/fixmap.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> +#include <asm/tlbflush.h> void pgd_init(unsigned long page) { @@ -30,12 +31,32 @@ void pgd_init(unsigned long page) } } +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) +pmd_t mk_pmd(struct page *page, pgprot_t prot) +{ + pmd_t pmd; + + pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); + + return pmd; +} + + +void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; + flush_tlb_all(); +} +#endif /* defined(CONFIG_TRANSPARENT_HUGEPAGE) */ + void __init pagetable_init(void) { unsigned long vaddr; pgd_t *pgd_base; #ifdef CONFIG_HIGHMEM pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -61,8 +82,9 @@ void __init pagetable_init(void) vaddr = PKMAP_BASE; fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); - pgd = swapper_pg_dir + __pgd_offset(vaddr); - pud = pud_offset(pgd, vaddr); + pgd = swapper_pg_dir + pgd_index(vaddr); + p4d = p4d_offset(pgd, vaddr); + pud = pud_offset(p4d, vaddr); pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 394673991bab..dbdbfe5d8408 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -221,13 +221,26 @@ static inline int __init mips_sc_probe(void) else return 0; - /* - * According to config2 it would be 5-ways, but that is contradicted - * by all documentation. - */ - if (current_cpu_type() == CPU_JZRISC && - mips_machtype == MACH_INGENIC_JZ4770) - c->scache.ways = 4; + if (current_cpu_type() == CPU_XBURST) { + switch (mips_machtype) { + /* + * According to config2 it would be 5-ways, but that is + * contradicted by all documentation. + */ + case MACH_INGENIC_JZ4770: + c->scache.ways = 4; + break; + + /* + * According to config2 it would be 5-ways and 512-sets, + * but that is contradicted by all documentation. + */ + case MACH_INGENIC_X1000: + c->scache.sets = 256; + c->scache.ways = 4; + break; + } + } c->scache.waysize = c->scache.sets * c->scache.linesz; c->scache.waybit = __ffs(c->scache.waysize); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index c13e46ced425..d7a9d5f211f0 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -35,10 +35,10 @@ extern void build_tlb_refill_handler(void); static inline void flush_micro_tlb(void) { switch (current_cpu_type()) { - case CPU_LOONGSON2: + case CPU_LOONGSON2EF: write_c0_diag(LOONGSON_DIAG_ITLB); break; - case CPU_LOONGSON3: + case CPU_LOONGSON64: write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); break; default: @@ -295,6 +295,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) { unsigned long flags; pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; @@ -320,7 +321,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); - pudp = pud_offset(pgdp, address); + p4dp = p4d_offset(pgdp, address); + pudp = pud_offset(p4dp, address); pmdp = pmd_offset(pudp, address); idx = read_c0_index(); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c deleted file mode 100644 index c1e9e144007e..000000000000 --- a/arch/mips/mm/tlb-r8k.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 David S. Miller (davem@davemloft.net) - * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/mm.h> - -#include <asm/cpu.h> -#include <asm/bootinfo.h> -#include <asm/mmu_context.h> -#include <asm/pgtable.h> - -extern void build_tlb_refill_handler(void); - -#define TFP_TLB_SIZE 384 -#define TFP_TLB_SET_SHIFT 7 - -/* CP0 hazard avoidance. */ -#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ - "nop; nop; nop; nop; nop; nop;\n\t" \ - ".set reorder\n\t") - -void local_flush_tlb_all(void) -{ - unsigned long flags; - unsigned long old_ctx; - int entry; - - local_irq_save(flags); - /* Save old context and create impossible VPN2 value */ - old_ctx = read_c0_entryhi(); - write_c0_entrylo(0); - - for (entry = 0; entry < TFP_TLB_SIZE; entry++) { - write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT); - write_c0_vaddr(entry << PAGE_SHIFT); - write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); - mtc0_tlbw_hazard(); - tlb_write(); - } - tlbw_use_hazard(); - write_c0_entryhi(old_ctx); - local_irq_restore(flags); -} - -void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - int cpu = smp_processor_id(); - unsigned long flags; - int oldpid, newpid, size; - - if (!cpu_context(cpu, mm)) - return; - - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - size = (size + 1) >> 1; - - local_irq_save(flags); - - if (size > TFP_TLB_SIZE / 2) { - drop_mmu_context(mm); - goto out_restore; - } - - oldpid = read_c0_entryhi(); - newpid = cpu_asid(cpu, mm); - - write_c0_entrylo(0); - - start &= PAGE_MASK; - end += (PAGE_SIZE - 1); - end &= PAGE_MASK; - while (start < end) { - signed long idx; - - write_c0_vaddr(start); - write_c0_entryhi(start); - start += PAGE_SIZE; - tlb_probe(); - idx = read_c0_tlbset(); - if (idx < 0) - continue; - - write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); - tlb_write(); - } - write_c0_entryhi(oldpid); - -out_restore: - local_irq_restore(flags); -} - -/* Usable for KV1 addresses only! */ -void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) -{ - unsigned long size, flags; - - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - size = (size + 1) >> 1; - - if (size > TFP_TLB_SIZE / 2) { - local_flush_tlb_all(); - return; - } - - local_irq_save(flags); - - write_c0_entrylo(0); - - start &= PAGE_MASK; - end += (PAGE_SIZE - 1); - end &= PAGE_MASK; - while (start < end) { - signed long idx; - - write_c0_vaddr(start); - write_c0_entryhi(start); - start += PAGE_SIZE; - tlb_probe(); - idx = read_c0_tlbset(); - if (idx < 0) - continue; - - write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); - tlb_write(); - } - - local_irq_restore(flags); -} - -void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) -{ - int cpu = smp_processor_id(); - unsigned long flags; - int oldpid, newpid; - signed long idx; - - if (!cpu_context(cpu, vma->vm_mm)) - return; - - newpid = cpu_asid(cpu, vma->vm_mm); - page &= PAGE_MASK; - local_irq_save(flags); - oldpid = read_c0_entryhi(); - write_c0_vaddr(page); - write_c0_entryhi(newpid); - tlb_probe(); - idx = read_c0_tlbset(); - if (idx < 0) - goto finish; - - write_c0_entrylo(0); - write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); - tlb_write(); - -finish: - write_c0_entryhi(oldpid); - local_irq_restore(flags); -} - -/* - * We will need multiple versions of update_mmu_cache(), one that just - * updates the TLB with the new pte(s), and another which also checks - * for the R4k "end of page" hardware bug and does the needy. - */ -void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) -{ - unsigned long flags; - pgd_t *pgdp; - pmd_t *pmdp; - pte_t *ptep; - int pid; - - /* - * Handle debugger faulting in for debugee. - */ - if (current->active_mm != vma->vm_mm) - return; - - pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); - - local_irq_save(flags); - address &= PAGE_MASK; - write_c0_vaddr(address); - write_c0_entryhi(pid); - pgdp = pgd_offset(vma->vm_mm, address); - pmdp = pmd_offset(pgdp, address); - ptep = pte_offset_map(pmdp, address); - tlb_probe(); - - write_c0_entrylo(pte_val(*ptep++) >> 6); - tlb_write(); - - write_c0_entryhi(pid); - local_irq_restore(flags); -} - -static void probe_tlb(unsigned long config) -{ - struct cpuinfo_mips *c = ¤t_cpu_data; - - c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ -} - -void tlb_init(void) -{ - unsigned int config = read_c0_config(); - unsigned long status; - - probe_tlb(config); - - status = read_c0_status(); - status &= ~(ST0_UPS | ST0_KPS); -#ifdef CONFIG_PAGE_SIZE_4KB - status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36); -#elif defined(CONFIG_PAGE_SIZE_8KB) - status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36); -#elif defined(CONFIG_PAGE_SIZE_16KB) - status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36); -#elif defined(CONFIG_PAGE_SIZE_64KB) - status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36); -#endif - write_c0_status(status); - - write_c0_wired(0); - - local_flush_tlb_all(); - - build_tlb_refill_handler(); -} diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 144ceb0fba88..344e6e9ea43b 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -545,7 +545,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, tlbw(p); break; - case CPU_R4300: case CPU_5KC: case CPU_TX49XX: case CPU_PR4450: @@ -572,8 +571,8 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_BMIPS4350: case CPU_BMIPS4380: case CPU_BMIPS5000: - case CPU_LOONGSON2: - case CPU_LOONGSON3: + case CPU_LOONGSON2EF: + case CPU_LOONGSON64: case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); @@ -604,13 +603,12 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_VR4131: case CPU_VR4133: - case CPU_R5432: uasm_i_nop(p); uasm_i_nop(p); tlbw(p); break; - case CPU_JZRISC: + case CPU_XBURST: tlbw(p); uasm_i_nop(p); break; @@ -631,7 +629,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, return; } - if (cpu_has_rixi && _PAGE_NO_EXEC) { + if (cpu_has_rixi && !!_PAGE_NO_EXEC) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { @@ -655,6 +653,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, int restore_scratch) { if (restore_scratch) { + /* + * Ensure the MFC0 below observes the value written to the + * KScratch register by the prior MTC0. + */ + if (scratch_reg >= 0) + uasm_i_ehb(p); + /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); @@ -669,12 +674,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_il_b(p, r, lid); } - if (scratch_reg >= 0) { - uasm_i_ehb(p); + if (scratch_reg >= 0) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - } else { + else UASM_i_LW(p, 1, scratchpad_offset(0), 0); - } } else { /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { @@ -923,6 +926,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, } if (mode != not_refill && check_for_high_segbits) { uasm_l_large_segbits_fault(l, *p); + + if (mode == refill_scratch && scratch_reg >= 0) + uasm_i_ehb(p); + /* * We get here if we are an xsseg address, or if we are * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. @@ -941,12 +948,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_jr(p, ptr); if (mode == refill_scratch) { - if (scratch_reg >= 0) { - uasm_i_ehb(p); + if (scratch_reg >= 0) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - } else { + else UASM_i_LW(p, 1, scratchpad_offset(0), 0); - } } else { uasm_i_nop(p); } @@ -1372,7 +1377,7 @@ static void build_r4000_tlb_refill_handler(void) switch (boot_cpu_type()) { default: if (sizeof(long) == 4) { - case CPU_LOONGSON2: + case CPU_LOONGSON2EF: /* Loongson2 ebase is different than r4k, we have more space */ if ((p - tlb_handler) > 64) panic("TLB refill handler space exceeded"); @@ -2609,21 +2614,11 @@ void build_tlb_refill_handler(void) check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); #endif - switch (current_cpu_type()) { - case CPU_R2000: - case CPU_R3000: - case CPU_R3000A: - case CPU_R3081E: - case CPU_TX3912: - case CPU_TX3922: - case CPU_TX3927: + if (cpu_has_3kex) { #ifndef CONFIG_MIPS_PGD_C0_CONTEXT - if (cpu_has_local_ebase) - build_r3000_tlb_refill_handler(); if (!run_once) { - if (!cpu_has_local_ebase) - build_r3000_tlb_refill_handler(); build_setup_pgd(); + build_r3000_tlb_refill_handler(); build_r3000_tlb_load_handler(); build_r3000_tlb_store_handler(); build_r3000_tlb_modify_handler(); @@ -2633,34 +2628,27 @@ void build_tlb_refill_handler(void) #else panic("No R3000 TLB refill handler"); #endif - break; + return; + } - case CPU_R8000: - panic("No R8000 TLB refill handler yet"); - break; + if (cpu_has_ldpte) + setup_pw(); - default: + if (!run_once) { + scratch_reg = allocate_kscratch(); + build_setup_pgd(); + build_r4000_tlb_load_handler(); + build_r4000_tlb_store_handler(); + build_r4000_tlb_modify_handler(); if (cpu_has_ldpte) - setup_pw(); - - if (!run_once) { - scratch_reg = allocate_kscratch(); - build_setup_pgd(); - build_r4000_tlb_load_handler(); - build_r4000_tlb_store_handler(); - build_r4000_tlb_modify_handler(); - if (cpu_has_ldpte) - build_loongson3_tlb_refill_handler(); - else if (!cpu_has_local_ebase) - build_r4000_tlb_refill_handler(); - flush_tlb_handlers(); - run_once++; - } - if (cpu_has_local_ebase) + build_loongson3_tlb_refill_handler(); + else build_r4000_tlb_refill_handler(); - if (cpu_has_xpa) - config_xpa_params(); - if (cpu_has_htw) - config_htw_params(); + flush_tlb_handlers(); + run_once++; } + if (cpu_has_xpa) + config_xpa_params(); + if (cpu_has_htw) + config_htw_params(); } |