summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/ia32/ia32_signal.c3
-rw-r--r--arch/x86/include/asm/fpu-internal.h4
-rw-r--r--arch/x86/include/asm/fpu/types.h6
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/kernel/fpu/core.c38
-rw-r--r--arch/x86/kernel/fpu/xsave.c11
-rw-r--r--arch/x86/kernel/signal.c8
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
9 files changed, 54 insertions, 28 deletions
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index bffb2c49ceb6..e1ec6f90d09e 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -307,6 +307,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size,
void __user **fpstate)
{
+ struct fpu *fpu = &current->thread.fpu;
unsigned long sp;
/* Default to using normal stack */
@@ -321,7 +322,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer;
- if (current->flags & PF_USED_MATH) {
+ if (fpu->fpstate_active) {
unsigned long fx_aligned, math_size;
sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size);
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 2cac49e3b4bd..9311126571ab 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -375,7 +375,7 @@ static inline void drop_fpu(struct task_struct *tsk)
__thread_fpu_end(fpu);
}
- tsk->flags &= ~PF_USED_MATH;
+ fpu->fpstate_active = 0;
preempt_enable();
}
@@ -424,7 +424,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
* If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math.
*/
- fpu.preload = (new->flags & PF_USED_MATH) &&
+ fpu.preload = new_fpu->fpstate_active &&
(use_eager_fpu() || new->thread.fpu.counter > 5);
if (old_fpu->has_fpu) {
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index efb520dcf38e..f6317d9aa808 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -137,6 +137,12 @@ struct fpu {
* deal with bursty apps that only use the FPU for a short time:
*/
unsigned char counter;
+ /*
+ * This flag indicates whether this context is fpstate_active: if the task is
+ * not running then we can restore from this context, if the task
+ * is running then we should save into this context.
+ */
+ unsigned char fpstate_active;
};
#endif /* _ASM_X86_FPU_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d50cc7f61559..0f4add462697 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -385,6 +385,10 @@ struct thread_struct {
unsigned long fs;
#endif
unsigned long gs;
+
+ /* Floating point and extended processor state */
+ struct fpu fpu;
+
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
@@ -395,8 +399,6 @@ struct thread_struct {
unsigned long cr2;
unsigned long trap_nr;
unsigned long error_code;
- /* floating point and extended processor state */
- struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 779813126f49..9e7f9e7b2cca 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -236,14 +236,17 @@ static void fpu_copy(struct task_struct *dst, struct task_struct *src)
int fpu__copy(struct task_struct *dst, struct task_struct *src)
{
+ struct fpu *dst_fpu = &dst->thread.fpu;
+ struct fpu *src_fpu = &src->thread.fpu;
+
dst->thread.fpu.counter = 0;
dst->thread.fpu.has_fpu = 0;
dst->thread.fpu.state = NULL;
task_disable_lazy_fpu_restore(dst);
- if (src->flags & PF_USED_MATH) {
- int err = fpstate_alloc(&dst->thread.fpu);
+ if (src_fpu->fpstate_active) {
+ int err = fpstate_alloc(dst_fpu);
if (err)
return err;
@@ -260,11 +263,12 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
*/
int fpstate_alloc_init(struct task_struct *curr)
{
+ struct fpu *fpu = &curr->thread.fpu;
int ret;
if (WARN_ON_ONCE(curr != current))
return -EINVAL;
- if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
+ if (WARN_ON_ONCE(fpu->fpstate_active))
return -EINVAL;
/*
@@ -277,7 +281,7 @@ int fpstate_alloc_init(struct task_struct *curr)
fpstate_init(&curr->thread.fpu);
/* Safe to do for the current task: */
- curr->flags |= PF_USED_MATH;
+ fpu->fpstate_active = 1;
return 0;
}
@@ -308,12 +312,13 @@ EXPORT_SYMBOL_GPL(fpstate_alloc_init);
*/
static int fpu__unlazy_stopped(struct task_struct *child)
{
+ struct fpu *child_fpu = &child->thread.fpu;
int ret;
if (WARN_ON_ONCE(child == current))
return -EINVAL;
- if (child->flags & PF_USED_MATH) {
+ if (child_fpu->fpstate_active) {
task_disable_lazy_fpu_restore(child);
return 0;
}
@@ -328,7 +333,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
fpstate_init(&child->thread.fpu);
/* Safe to do for stopped child tasks: */
- child->flags |= PF_USED_MATH;
+ child_fpu->fpstate_active = 1;
return 0;
}
@@ -348,7 +353,7 @@ void fpu__restore(void)
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
- if (!(tsk->flags & PF_USED_MATH)) {
+ if (!fpu->fpstate_active) {
local_irq_enable();
/*
* does a slab alloc which can sleep
@@ -378,6 +383,8 @@ EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__flush_thread(struct task_struct *tsk)
{
+ struct fpu *fpu = &tsk->thread.fpu;
+
WARN_ON(tsk != current);
if (!use_eager_fpu()) {
@@ -385,7 +392,7 @@ void fpu__flush_thread(struct task_struct *tsk)
drop_fpu(tsk);
fpstate_free(&tsk->thread.fpu);
} else {
- if (!(tsk->flags & PF_USED_MATH)) {
+ if (!fpu->fpstate_active) {
/* kthread execs. TODO: cleanup this horror. */
if (WARN_ON(fpstate_alloc_init(tsk)))
force_sig(SIGKILL, tsk);
@@ -402,12 +409,16 @@ void fpu__flush_thread(struct task_struct *tsk)
*/
int fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
- return (target->flags & PF_USED_MATH) ? regset->n : 0;
+ struct fpu *target_fpu = &target->thread.fpu;
+
+ return target_fpu->fpstate_active ? regset->n : 0;
}
int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
{
- return (cpu_has_fxsr && (target->flags & PF_USED_MATH)) ? regset->n : 0;
+ struct fpu *target_fpu = &target->thread.fpu;
+
+ return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
}
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
@@ -733,16 +744,17 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
* struct user_i387_struct) but is in fact only used for 32-bit
* dumps, so on 64-bit it is really struct user_i387_ia32_struct.
*/
-int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
+int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
{
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
- fpvalid = !!(tsk->flags & PF_USED_MATH);
+ fpvalid = fpu->fpstate_active;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),
- fpu, NULL);
+ ufpu, NULL);
return fpvalid;
}
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index 8cd127049c9b..dc346e19c0df 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -334,6 +334,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
int state_size = xstate_size;
u64 xstate_bv = 0;
int fx_only = 0;
@@ -349,7 +350,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- if (!(tsk->flags & PF_USED_MATH) && fpstate_alloc_init(tsk))
+ if (!fpu->fpstate_active && fpstate_alloc_init(tsk))
return -1;
if (!static_cpu_has(X86_FEATURE_FPU))
@@ -384,12 +385,12 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
int err = 0;
/*
- * Drop the current fpu which clears PF_USED_MATH. This ensures
+ * Drop the current fpu which clears fpu->fpstate_active. This ensures
* that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after
- * PF_USED_MATH is again set.
+ * fpu->fpstate_active is again set.
*/
drop_fpu(tsk);
@@ -401,7 +402,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
}
- tsk->flags |= PF_USED_MATH;
+ fpu->fpstate_active = 1;
if (use_eager_fpu()) {
preempt_disable();
fpu__restore();
@@ -685,7 +686,7 @@ void xsave_init(void)
*/
void __init_refok eager_fpu_init(void)
{
- WARN_ON(current->flags & PF_USED_MATH);
+ WARN_ON(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;
if (eagerfpu == ENABLE)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 8e2529ebb8c6..20a9d355af59 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -198,6 +198,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
unsigned long sp = regs->sp;
unsigned long buf_fx = 0;
int onsigstack = on_sig_stack(sp);
+ struct fpu *fpu = &current->thread.fpu;
/* redzone */
if (config_enabled(CONFIG_X86_64))
@@ -217,7 +218,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
}
}
- if (current->flags & PF_USED_MATH) {
+ if (fpu->fpstate_active) {
sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
&buf_fx, &math_size);
*fpstate = (void __user *)sp;
@@ -233,7 +234,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
return (void __user *)-1L;
/* save i387 and extended state */
- if ((current->flags & PF_USED_MATH) &&
+ if (fpu->fpstate_active &&
save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0)
return (void __user *)-1L;
@@ -616,6 +617,7 @@ static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
bool stepping, failed;
+ struct fpu *fpu = &current->thread.fpu;
/* Are we from a system call? */
if (syscall_get_nr(current, regs) >= 0) {
@@ -664,7 +666,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/*
* Ensure the signal handler starts with the new fpu state.
*/
- if (current->flags & PF_USED_MATH)
+ if (fpu->fpstate_active)
fpu_reset_state(current);
}
signal_setup_done(failed, ksig, stepping);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0635a1fd43ba..bab8afb61dc1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6597,10 +6597,11 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
+ struct fpu *fpu = &current->thread.fpu;
int r;
sigset_t sigsaved;
- if (!(current->flags & PF_USED_MATH) && fpstate_alloc_init(current))
+ if (!fpu->fpstate_active && fpstate_alloc_init(current))
return -ENOMEM;
if (vcpu->sigset_active)
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index bf628804d67c..f1aac55d6a67 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -147,8 +147,9 @@ void math_emulate(struct math_emu_info *info)
unsigned long code_base = 0;
unsigned long code_limit = 0; /* Initialized to stop compiler warnings */
struct desc_struct code_descriptor;
+ struct fpu *fpu = &current->thread.fpu;
- if (!(current->flags & PF_USED_MATH)) {
+ if (!fpu->fpstate_active) {
if (fpstate_alloc_init(current)) {
do_group_exit(SIGKILL);
return;
OpenPOWER on IntegriCloud