diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/gup.c | 17 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 14 | ||||
-rw-r--r-- | arch/mips/mm/sc-debugfs.c | 81 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 65 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 98 |
10 files changed, 214 insertions, 75 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 67ede4ef9b8d..b4c64bd3f723 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o +obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 5d3a25e1cfae..caac3d747a90 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -587,7 +587,8 @@ static inline void local_r4k_flush_cache_page(void *args) * another ASID than the current one. */ map_coherent = (cpu_has_dc_aliases && - page_mapped(page) && !Page_dcache_dirty(page)); + page_mapcount(page) && + !Page_dcache_dirty(page)); if (map_coherent) vaddr = kmap_coherent(page, addr); else diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index aab218c36e0d..3f159caf6dbc 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -106,7 +106,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr)) { - if (page_mapped(page) && !Page_dcache_dirty(page)) { + if (page_mapcount(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index d8117be729a2..730d394ce5f0 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, gfp = massage_gfp_flags(dev, gfp); - if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) + if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) page = dma_alloc_from_contiguous(dev, count, get_order(size)); if (!page) diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 349995d19c7f..1afd87c999b0 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -87,8 +87,6 @@ static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end, do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; - if (PageTail(page)) - get_huge_page_tail(page); (*nr)++; page++; refs++; @@ -109,18 +107,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, pmd_t pmd = *pmdp; next = pmd_addr_end(addr, end); - /* - * The pmd_trans_splitting() check below explains why - * pmdp_splitting_flush has to flush the tlb, to stop - * this gup-fast code from running while we set the - * splitting bit in the pmd. Returning zero will take - * the slow path that will call wait_split_huge_page() - * if the pmd is still in splitting state. gup-fast - * can't because it has irq disabled and - * wait_split_huge_page() would never return as the - * tlb flush IPI wouldn't run. - */ - if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + if (pmd_none(pmd)) return 0; if (unlikely(pmd_huge(pmd))) { if (!gup_huge_pmd(pmd, addr, next, write, pages,nr)) @@ -153,8 +140,6 @@ static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; - if (PageTail(page)) - get_huge_page_tail(page); (*nr)++; page++; refs++; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 8770e619185e..7e5fa0938c21 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -165,7 +165,7 @@ void copy_user_highpage(struct page *to, struct page *from, vto = kmap_atomic(to); if (cpu_has_dc_aliases && - page_mapped(from) && !Page_dcache_dirty(from)) { + page_mapcount(from) && !Page_dcache_dirty(from)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(); @@ -187,7 +187,7 @@ void copy_to_user_page(struct vm_area_struct *vma, unsigned long len) { if (cpu_has_dc_aliases && - page_mapped(page) && !Page_dcache_dirty(page)) { + page_mapcount(page) && !Page_dcache_dirty(page)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(); @@ -205,7 +205,7 @@ void copy_from_user_page(struct vm_area_struct *vma, unsigned long len) { if (cpu_has_dc_aliases && - page_mapped(page) && !Page_dcache_dirty(page)) { + page_mapcount(page) && !Page_dcache_dirty(page)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(); diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index e8adc0069d66..ce4473e7c0d2 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -62,20 +62,6 @@ void pmd_init(unsigned long addr, unsigned long pagetable) } #endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - -void pmdp_splitting_flush(struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmdp) -{ - if (!pmd_trans_splitting(*pmdp)) { - pmd_t pmd = pmd_mksplitting(*pmdp); - set_pmd_at(vma->vm_mm, address, pmdp, pmd); - } -} - -#endif - pmd_t mk_pmd(struct page *page, pgprot_t prot) { pmd_t pmd; diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c new file mode 100644 index 000000000000..5eefe3281b24 --- /dev/null +++ b/arch/mips/mm/sc-debugfs.c @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2015 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <asm/bcache.h> +#include <asm/debug.h> +#include <asm/uaccess.h> +#include <linux/debugfs.h> +#include <linux/init.h> + +static ssize_t sc_prefetch_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + bool enabled = bc_prefetch_is_enabled(); + char buf[3]; + + buf[0] = enabled ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = 0; + + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t sc_prefetch_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[32]; + ssize_t buf_size; + bool enabled; + int err; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + err = strtobool(buf, &enabled); + if (err) + return err; + + if (enabled) + bc_prefetch_enable(); + else + bc_prefetch_disable(); + + return count; +} + +static const struct file_operations sc_prefetch_fops = { + .open = simple_open, + .llseek = default_llseek, + .read = sc_prefetch_read, + .write = sc_prefetch_write, +}; + +static int __init sc_debugfs_init(void) +{ + struct dentry *dir, *file; + + if (!mips_debugfs_dir) + return -ENODEV; + + dir = debugfs_create_dir("l2cache", mips_debugfs_dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + + file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, + NULL, &sc_prefetch_fops); + if (IS_ERR(file)) + return PTR_ERR(file); + + return 0; +} +late_initcall(sc_debugfs_init); diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 53ea8391f9bb..3bd0597d9c3d 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -51,11 +51,69 @@ static void mips_sc_disable(void) /* L2 cache is permanently enabled */ } +static void mips_sc_prefetch_enable(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return; + + /* + * If there is one or more L2 prefetch unit present then enable + * prefetching for both code & data, for all ports. + */ + pftctl = read_gcr_l2_pft_control(); + if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) { + pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; + pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; + pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; + write_gcr_l2_pft_control(pftctl); + + pftctl = read_gcr_l2_pft_control_b(); + pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; + pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; + write_gcr_l2_pft_control_b(pftctl); + } +} + +static void mips_sc_prefetch_disable(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return; + + pftctl = read_gcr_l2_pft_control(); + pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; + write_gcr_l2_pft_control(pftctl); + + pftctl = read_gcr_l2_pft_control_b(); + pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; + pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; + write_gcr_l2_pft_control_b(pftctl); +} + +static bool mips_sc_prefetch_is_enabled(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return false; + + pftctl = read_gcr_l2_pft_control(); + if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK)) + return false; + return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK); +} + static struct bcache_ops mips_sc_ops = { .bc_enable = mips_sc_enable, .bc_disable = mips_sc_disable, .bc_wback_inv = mips_sc_wback_inv, - .bc_inv = mips_sc_inv + .bc_inv = mips_sc_inv, + .bc_prefetch_enable = mips_sc_prefetch_enable, + .bc_prefetch_disable = mips_sc_prefetch_disable, + .bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled, }; /* @@ -162,13 +220,13 @@ static inline int __init mips_sc_probe(void) return 0; tmp = (config2 >> 8) & 0x0f; - if (0 <= tmp && tmp <= 7) + if (tmp <= 7) c->scache.sets = 64 << tmp; else return 0; tmp = (config2 >> 0) & 0x0f; - if (0 <= tmp && tmp <= 7) + if (tmp <= 7) c->scache.ways = tmp + 1; else return 0; @@ -186,6 +244,7 @@ int mips_sc_init(void) int found = mips_sc_probe(); if (found) { mips_sc_enable(); + mips_sc_prefetch_enable(); bcops = &mips_sc_ops; } return found; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 323d1d302f2b..482192cc8f2b 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -240,7 +240,6 @@ static void output_pgtable_bits_defines(void) pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); - pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); #endif #ifdef CONFIG_CPU_MIPSR2 if (cpu_has_rixi) { @@ -311,6 +310,7 @@ static struct uasm_label labels[128]; static struct uasm_reloc relocs[128]; static int check_for_high_segbits; +static bool fill_includes_sw_bits; static unsigned int kscratch_used_mask; @@ -630,8 +630,14 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { - if (cpu_has_rixi) { - UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); + if (cpu_has_rixi && _PAGE_NO_EXEC) { + if (fill_includes_sw_bits) { + UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); + } else { + UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, reg, reg, + ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + } } else { #ifdef CONFIG_PHYS_ADDR_T_64BIT uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -1005,21 +1011,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) * 64bit address support (36bit on a 32bit CPU) in a 32bit * Kernel is a special case. Only a few CPUs use it. */ -#ifdef CONFIG_PHYS_ADDR_T_64BIT - if (cpu_has_64bits) { - uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ - uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ - if (cpu_has_rixi) { - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); - } else { - uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ - } - UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ - } else { + if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { int pte_off_even = sizeof(pte_t) / 2; int pte_off_odd = pte_off_even + sizeof(pte_t); #ifdef CONFIG_XPA @@ -1043,31 +1035,23 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) uasm_i_mthc0(p, tmp, C0_ENTRYLO0); uasm_i_mthc0(p, ptep, C0_ENTRYLO1); #endif + return; } -#else + UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (r45k_bvahwbug()) build_tlb_probe_entry(p); - if (cpu_has_rixi) { - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); - if (r4k_250MHZhwbug()) - UASM_i_MTC0(p, 0, C0_ENTRYLO0); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); - } else { - UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ - if (r4k_250MHZhwbug()) - UASM_i_MTC0(p, 0, C0_ENTRYLO0); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ - if (r45k_bvahwbug()) - uasm_i_mfc0(p, tmp, C0_INDEX); - } + build_convert_pte_to_entrylo(p, tmp); + if (r4k_250MHZhwbug()) + UASM_i_MTC0(p, 0, C0_ENTRYLO0); + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ + build_convert_pte_to_entrylo(p, ptep); + if (r45k_bvahwbug()) + uasm_i_mfc0(p, tmp, C0_INDEX); if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ -#endif } struct mips_huge_tlb_info { @@ -2299,6 +2283,10 @@ static void config_htw_params(void) /* re-initialize the PTI field including the even/odd bit */ pwfield &= ~MIPS_PWFIELD_PTI_MASK; pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT; + if (CONFIG_PGTABLE_LEVELS >= 3) { + pwfield &= ~MIPS_PWFIELD_MDI_MASK; + pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT; + } /* Set the PTEI right shift */ ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT; pwfield |= ptei; @@ -2320,9 +2308,11 @@ static void config_htw_params(void) pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; + if (CONFIG_PGTABLE_LEVELS >= 3) + pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; /* If XPA has been enabled, PTEs are 64-bit in size. */ - if (read_c0_pagegrain() & PG_ELPA) + if (config_enabled(CONFIG_64BITS) || (read_c0_pagegrain() & PG_ELPA)) pwsize |= 1; write_c0_pwsize(pwsize); @@ -2360,6 +2350,41 @@ static void config_xpa_params(void) #endif } +static void check_pabits(void) +{ + unsigned long entry; + unsigned pabits, fillbits; + + if (!cpu_has_rixi || !_PAGE_NO_EXEC) { + /* + * We'll only be making use of the fact that we can rotate bits + * into the fill if the CPU supports RIXI, so don't bother + * probing this for CPUs which don't. + */ + return; + } + + write_c0_entrylo0(~0ul); + back_to_back_c0_hazard(); + entry = read_c0_entrylo0(); + + /* clear all non-PFN bits */ + entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1); + entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); + + /* find a lower bound on PABITS, and upper bound on fill bits */ + pabits = fls_long(entry) + 6; + fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0); + + /* minus the RI & XI bits */ + fillbits -= min_t(unsigned, fillbits, 2); + + if (fillbits >= ilog2(_PAGE_NO_EXEC)) + fill_includes_sw_bits = true; + + pr_debug("Entry* registers contain %u fill bits\n", fillbits); +} + void build_tlb_refill_handler(void) { /* @@ -2370,6 +2395,7 @@ void build_tlb_refill_handler(void) static int run_once = 0; output_pgtable_bits_defines(); + check_pabits(); #ifdef CONFIG_64BIT check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); |