summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/process.c')
-rw-r--r--arch/i386/kernel/process.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 23ae198dbbc3..cfae587bf7d2 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -670,14 +670,6 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
load_TLS(next, cpu);
/*
- * Restore %gs if needed (which is common)
- */
- if (prev->gs | next->gs)
- loadsegment(gs, next->gs);
-
- write_pda(pcurrent, next_p);
-
- /*
* Now maybe handle debug registers and/or IO bitmaps
*/
if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
@@ -686,6 +678,15 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
disable_tsc(prev_p, next_p);
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+ * the GDT and LDT are properly updated, and must be
+ * done before math_state_restore, so the TS bit is up
+ * to date.
+ */
+ arch_leave_lazy_cpu_mode();
+
/* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
@@ -693,6 +694,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
if (next_p->fpu_counter > 5)
math_state_restore();
+ /*
+ * Restore %gs if needed (which is common)
+ */
+ if (prev->gs | next->gs)
+ loadsegment(gs, next->gs);
+
+ write_pda(pcurrent, next_p);
+
return prev_p;
}
OpenPOWER on IntegriCloud