diff options
Diffstat (limited to 'arch/mips/kernel/process.c')
-rw-r--r-- | arch/mips/kernel/process.c | 72 |
1 files changed, 38 insertions, 34 deletions
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 92880cee449e..d2d061520a23 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -30,6 +30,7 @@ #include <asm/asm.h> #include <asm/bootinfo.h> #include <asm/cpu.h> +#include <asm/dsemul.h> #include <asm/dsp.h> #include <asm/fpu.h> #include <asm/msa.h> @@ -68,17 +69,20 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) lose_fpu(0); clear_thread_flag(TIF_MSA_CTX_LIVE); clear_used_math(); + atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); init_dsp(); regs->cp0_epc = pc; regs->regs[29] = sp; } -void exit_thread(void) -{ -} - -void flush_thread(void) +void exit_thread(struct task_struct *tsk) { + /* + * User threads may have allocated a delay slot emulation frame. + * If so, clean up that allocation. + */ + if (!(current->flags & PF_KTHREAD)) + dsemul_thread_cleanup(tsk); } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) @@ -167,6 +171,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, clear_tsk_thread_flag(p, TIF_FPUBOUND); #endif /* CONFIG_MIPS_MT_FPAFF */ + atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); + if (clone_flags & CLONE_SETTLS) ti->tp_value = regs->regs[7]; @@ -353,7 +359,7 @@ static int get_frame_info(struct mips_frame_info *info) return 0; if (info->pc_offset < 0) /* leaf */ return 1; - /* prologue seems boggus... */ + /* prologue seems bogus... */ err: return -1; } @@ -455,7 +461,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { regs = (struct pt_regs *)*sp; pc = regs->cp0_epc; - if (__kernel_text_address(pc)) { + if (!user_mode(regs) && __kernel_text_address(pc)) { *sp = regs->regs[29]; *ra = regs->regs[31]; return pc; @@ -580,27 +586,38 @@ int mips_get_process_fp_mode(struct task_struct *task) return value; } +static void prepare_for_fp_mode_switch(void *info) +{ + struct mm_struct *mm = info; + + if (current->mm == mm) + lose_fpu(1); +} + int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) { const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; - unsigned long switch_count; struct task_struct *t; + int max_users; /* Check the value is valid */ if (value & ~known_bits) return -EOPNOTSUPP; /* Avoid inadvertently triggering emulation */ - if ((value & PR_FP_MODE_FR) && cpu_has_fpu && - !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && + !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) return -EOPNOTSUPP; - if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) + if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) return -EOPNOTSUPP; /* FR = 0 not supported in MIPS R6 */ - if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) + if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) return -EOPNOTSUPP; + /* Proceed with the mode switch */ + preempt_disable(); + /* Save FP & vector context, then disable FPU & MSA */ if (task->signal == current->signal) lose_fpu(1); @@ -610,31 +627,17 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) smp_mb__after_atomic(); /* - * If there are multiple online CPUs then wait until all threads whose - * FP mode is about to change have been context switched. This approach - * allows us to only worry about whether an FP mode switch is in - * progress when FP is first used in a tasks time slice. Pretty much all - * of the mode switch overhead can thus be confined to cases where mode - * switches are actually occurring. That is, to here. However for the - * thread performing the mode switch it may take a while... + * If there are multiple online CPUs then force any which are running + * threads in this process to lose their FPU context, which they can't + * regain until fp_mode_switching is cleared later. */ if (num_online_cpus() > 1) { - spin_lock_irq(&task->sighand->siglock); - - for_each_thread(task, t) { - if (t == current) - continue; + /* No need to send an IPI for the local CPU */ + max_users = (task->mm == current->mm) ? 1 : 0; - switch_count = t->nvcsw + t->nivcsw; - - do { - spin_unlock_irq(&task->sighand->siglock); - cond_resched(); - spin_lock_irq(&task->sighand->siglock); - } while ((t->nvcsw + t->nivcsw) == switch_count); - } - - spin_unlock_irq(&task->sighand->siglock); + if (atomic_read(¤t->mm->mm_users) > max_users) + smp_call_function(prepare_for_fp_mode_switch, + (void *)current->mm, 1); } /* @@ -659,6 +662,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) /* Allow threads to use FP again */ atomic_set(&task->mm->context.fp_mode_switching, 0); + preempt_enable(); return 0; } |