diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r-- | arch/mips/kernel/traps.c | 32 |
1 files changed, 17 insertions, 15 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ae0c89d23ad7..4a1712b5abdf 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -145,7 +145,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) if (!task) task = current; - if (raw_show_trace || !__kernel_text_address(pc)) { + if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { show_raw_backtrace(sp); return; } @@ -399,11 +399,8 @@ void __noreturn die(const char *str, struct pt_regs *regs) if (in_interrupt()) panic("Fatal exception in interrupt"); - if (panic_on_oops) { - printk(KERN_EMERG "Fatal exception: panic in 5 seconds"); - ssleep(5); + if (panic_on_oops) panic("Fatal exception"); - } if (regs && kexec_should_crash(current)) crash_kexec(regs); @@ -1249,7 +1246,7 @@ static int enable_restore_fp_context(int msa) err = init_fpu(); if (msa && !err) { enable_msa(); - _init_msa_upper(); + init_msa_upper(); set_thread_flag(TIF_USEDMSA); set_thread_flag(TIF_MSA_CTX_LIVE); } @@ -1312,7 +1309,7 @@ static int enable_restore_fp_context(int msa) */ prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); if (!prior_msa && was_fpu_owner) { - _init_msa_upper(); + init_msa_upper(); goto out; } @@ -1329,7 +1326,7 @@ static int enable_restore_fp_context(int msa) * of each vector register such that it cannot see data left * behind by another task. */ - _init_msa_upper(); + init_msa_upper(); } else { /* We need to restore the vector context. */ restore_msa(current); @@ -1356,7 +1353,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) unsigned long fcr31; unsigned int cpid; int status, err; - unsigned long __maybe_unused flags; int sig; prev_state = exception_enter(); @@ -1501,16 +1497,13 @@ asmlinkage void do_watch(struct pt_regs *regs) { siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT }; enum ctx_state prev_state; - u32 cause; prev_state = exception_enter(); /* * Clear WP (bit 22) bit of cause register so we don't loop * forever. */ - cause = read_c0_cause(); - cause &= ~(1 << 22); - write_c0_cause(cause); + clear_c0_cause(CAUSEF_WP); /* * If the current thread has the watch registers loaded, save @@ -1647,6 +1640,7 @@ static inline void parity_protection_init(void) case CPU_P5600: case CPU_QEMU_GENERIC: case CPU_I6400: + case CPU_P6600: { #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000 @@ -1777,7 +1771,8 @@ asmlinkage void do_ftlb(void) /* For the moment, report the problem and hang. */ if ((cpu_has_mips_r2_r6) && - ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { + (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) { pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", read_c0_ecc()); pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); @@ -2119,6 +2114,13 @@ void per_cpu_trap_init(bool is_boot_cpu) * o read IntCtl.IPFDC to determine the fast debug channel interrupt */ if (cpu_has_mips_r2_r6) { + /* + * We shouldn't trust a secondary core has a sane EBASE register + * so use the one calculated by the boot CPU. + */ + if (!is_boot_cpu) + write_c0_ebase(ebase); + cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; @@ -2134,7 +2136,7 @@ void per_cpu_trap_init(bool is_boot_cpu) } if (!cpu_data[cpu].asid_cache) - cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; + cpu_data[cpu].asid_cache = asid_first_version(cpu); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; |