diff options
author | Paul Burton <paul.burton@imgtec.com> | 2016-04-19 09:25:09 +0100 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2016-05-13 15:30:25 +0200 |
commit | 2caa89b49b21737b2b7b6fce37cb323535188b06 (patch) | |
tree | 09d9d183545b2186a08176c41019bffeb53a2aa5 /arch/mips/mm | |
parent | b4ebbb876dca9327cc964138f5558ca3c6832392 (diff) | |
download | talos-op-linux-2caa89b49b21737b2b7b6fce37cb323535188b06.tar.gz talos-op-linux-2caa89b49b21737b2b7b6fce37cb323535188b06.zip |
MIPS: mm: Simplify build_update_entries
We can simplify build_update_entries by unifying the code for the 36 bit
physical addressing with MIPS32 case with the general case, by using
pte_off_ variables in all cases & handling the trivial
_PAGE_GLOBAL_SHIFT == 0 case in build_convert_pte_to_entrylo. This
leaves XPA as the only special case.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13123/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/tlbex.c | 37 |
1 files changed, 16 insertions, 21 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 6f20b42be979..588bde3bdbe7 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -631,6 +631,11 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { + if (_PAGE_GLOBAL_SHIFT == 0) { + /* pte_t is already in EntryLo format */ + return; + } + if (cpu_has_rixi && _PAGE_NO_EXEC) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -1011,10 +1016,16 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { - if (config_enabled(CONFIG_XPA)) { - int pte_off_even = sizeof(pte_t) / 2; - int pte_off_odd = pte_off_even + sizeof(pte_t); + int pte_off_even = 0; + int pte_off_odd = sizeof(pte_t); +#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT) + /* The low 32 bits of EntryLo is stored in pte_high */ + pte_off_even += offsetof(pte_t, pte_high); + pte_off_odd += offsetof(pte_t, pte_high); +#endif + + if (config_enabled(CONFIG_XPA)) { uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); @@ -1033,24 +1044,8 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) return; } - /* - * 64bit address support (36bit on a 32bit CPU) in a 32bit - * Kernel is a special case. Only a few CPUs use it. - */ - if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { - int pte_off_even = sizeof(pte_t) / 2; - int pte_off_odd = pte_off_even + sizeof(pte_t); - - uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); - - uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ - UASM_i_MTC0(p, ptep, C0_ENTRYLO1); - return; - } - - UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ - UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ + UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */ + UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */ if (r45k_bvahwbug()) build_tlb_probe_entry(p); build_convert_pte_to_entrylo(p, tmp); |