diff options
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r-- | arch/mips/mm/tlbex.c | 92 |
1 files changed, 40 insertions, 52 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 144ceb0fba88..344e6e9ea43b 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -545,7 +545,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, tlbw(p); break; - case CPU_R4300: case CPU_5KC: case CPU_TX49XX: case CPU_PR4450: @@ -572,8 +571,8 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_BMIPS4350: case CPU_BMIPS4380: case CPU_BMIPS5000: - case CPU_LOONGSON2: - case CPU_LOONGSON3: + case CPU_LOONGSON2EF: + case CPU_LOONGSON64: case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); @@ -604,13 +603,12 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_VR4131: case CPU_VR4133: - case CPU_R5432: uasm_i_nop(p); uasm_i_nop(p); tlbw(p); break; - case CPU_JZRISC: + case CPU_XBURST: tlbw(p); uasm_i_nop(p); break; @@ -631,7 +629,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, return; } - if (cpu_has_rixi && _PAGE_NO_EXEC) { + if (cpu_has_rixi && !!_PAGE_NO_EXEC) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { @@ -655,6 +653,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, int restore_scratch) { if (restore_scratch) { + /* + * Ensure the MFC0 below observes the value written to the + * KScratch register by the prior MTC0. + */ + if (scratch_reg >= 0) + uasm_i_ehb(p); + /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); @@ -669,12 +674,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_il_b(p, r, lid); } - if (scratch_reg >= 0) { - uasm_i_ehb(p); + if (scratch_reg >= 0) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - } else { + else UASM_i_LW(p, 1, scratchpad_offset(0), 0); - } } else { /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { @@ -923,6 +926,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, } if (mode != not_refill && check_for_high_segbits) { uasm_l_large_segbits_fault(l, *p); + + if (mode == refill_scratch && scratch_reg >= 0) + uasm_i_ehb(p); + /* * We get here if we are an xsseg address, or if we are * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. @@ -941,12 +948,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_jr(p, ptr); if (mode == refill_scratch) { - if (scratch_reg >= 0) { - uasm_i_ehb(p); + if (scratch_reg >= 0) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - } else { + else UASM_i_LW(p, 1, scratchpad_offset(0), 0); - } } else { uasm_i_nop(p); } @@ -1372,7 +1377,7 @@ static void build_r4000_tlb_refill_handler(void) switch (boot_cpu_type()) { default: if (sizeof(long) == 4) { - case CPU_LOONGSON2: + case CPU_LOONGSON2EF: /* Loongson2 ebase is different than r4k, we have more space */ if ((p - tlb_handler) > 64) panic("TLB refill handler space exceeded"); @@ -2609,21 +2614,11 @@ void build_tlb_refill_handler(void) check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); #endif - switch (current_cpu_type()) { - case CPU_R2000: - case CPU_R3000: - case CPU_R3000A: - case CPU_R3081E: - case CPU_TX3912: - case CPU_TX3922: - case CPU_TX3927: + if (cpu_has_3kex) { #ifndef CONFIG_MIPS_PGD_C0_CONTEXT - if (cpu_has_local_ebase) - build_r3000_tlb_refill_handler(); if (!run_once) { - if (!cpu_has_local_ebase) - build_r3000_tlb_refill_handler(); build_setup_pgd(); + build_r3000_tlb_refill_handler(); build_r3000_tlb_load_handler(); build_r3000_tlb_store_handler(); build_r3000_tlb_modify_handler(); @@ -2633,34 +2628,27 @@ void build_tlb_refill_handler(void) #else panic("No R3000 TLB refill handler"); #endif - break; + return; + } - case CPU_R8000: - panic("No R8000 TLB refill handler yet"); - break; + if (cpu_has_ldpte) + setup_pw(); - default: + if (!run_once) { + scratch_reg = allocate_kscratch(); + build_setup_pgd(); + build_r4000_tlb_load_handler(); + build_r4000_tlb_store_handler(); + build_r4000_tlb_modify_handler(); if (cpu_has_ldpte) - setup_pw(); - - if (!run_once) { - scratch_reg = allocate_kscratch(); - build_setup_pgd(); - build_r4000_tlb_load_handler(); - build_r4000_tlb_store_handler(); - build_r4000_tlb_modify_handler(); - if (cpu_has_ldpte) - build_loongson3_tlb_refill_handler(); - else if (!cpu_has_local_ebase) - build_r4000_tlb_refill_handler(); - flush_tlb_handlers(); - run_once++; - } - if (cpu_has_local_ebase) + build_loongson3_tlb_refill_handler(); + else build_r4000_tlb_refill_handler(); - if (cpu_has_xpa) - config_xpa_params(); - if (cpu_has_htw) - config_htw_params(); + flush_tlb_handlers(); + run_once++; } + if (cpu_has_xpa) + config_xpa_params(); + if (cpu_has_htw) + config_htw_params(); } |