diff options
Diffstat (limited to 'arch/x86/kernel/process.c')
| -rw-r--r-- | arch/x86/kernel/process.c | 21 | 
1 files changed, 14 insertions, 7 deletions
| diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 97fb3e5737f5..832a6acd730f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -47,7 +47,7 @@   * section. Since TSS's are completely CPU-local, we want them   * on exact cacheline boundaries, to eliminate cacheline ping-pong.   */ -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { +__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {  	.x86_tss = {  		/*  		 * .sp0 is only used when entering ring 0 from a lower @@ -56,6 +56,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {  		 * Poison it.  		 */  		.sp0 = (1UL << (BITS_PER_LONG-1)) + 1, + +#ifdef CONFIG_X86_64 +		/* +		 * .sp1 is cpu_current_top_of_stack.  The init task never +		 * runs user code, but cpu_current_top_of_stack should still +		 * be well defined before the first context switch. +		 */ +		.sp1 = TOP_OF_INIT_STACK, +#endif +  #ifdef CONFIG_X86_32  		.ss0 = __KERNEL_DS,  		.ss1 = __KERNEL_CS, @@ -71,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {  	  */  	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },  #endif -#ifdef CONFIG_X86_32 -	.SYSENTER_stack_canary	= STACK_END_MAGIC, -#endif  }; -EXPORT_PER_CPU_SYMBOL(cpu_tss); +EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);  DEFINE_PER_CPU(bool, __tss_limit_invalid);  EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); @@ -104,7 +111,7 @@ void exit_thread(struct task_struct *tsk)  	struct fpu *fpu = &t->fpu;  	if (bp) { -		struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); +		struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());  		t->io_bitmap_ptr = NULL;  		clear_thread_flag(TIF_IO_BITMAP); @@ -299,7 +306,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,  	}  	if ((tifp ^ tifn) & _TIF_NOTSC) -		cr4_toggle_bits(X86_CR4_TSD); +		cr4_toggle_bits_irqsoff(X86_CR4_TSD);  	if ((tifp ^ tifn) & _TIF_NOCPUID)  		set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); | 

