diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/processor.h | 80 | ||||
-rw-r--r-- | arch/x86/include/asm/unwind.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/x86_init.h | 26 |
3 files changed, 80 insertions, 28 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 6aa741fbe1df..eaf100508c36 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -602,33 +602,69 @@ static __always_inline void cpu_relax(void) rep_nop(); } -/* Stop speculative execution and prefetching of modified code. */ +/* + * This function forces the icache and prefetched instruction stream to + * catch up with reality in two very specific cases: + * + * a) Text was modified using one virtual address and is about to be executed + * from the same physical page at a different virtual address. + * + * b) Text was modified on a different CPU, may subsequently be + * executed on this CPU, and you want to make sure the new version + * gets executed. This generally means you're calling this in a IPI. + * + * If you're calling this for a different reason, you're probably doing + * it wrong. + */ static inline void sync_core(void) { - int tmp; - -#ifdef CONFIG_M486 /* - * Do a CPUID if available, otherwise do a jump. The jump - * can conveniently enough be the jump around CPUID. + * There are quite a few ways to do this. IRET-to-self is nice + * because it works on every CPU, at any CPL (so it's compatible + * with paravirtualization), and it never exits to a hypervisor. + * The only down sides are that it's a bit slow (it seems to be + * a bit more than 2x slower than the fastest options) and that + * it unmasks NMIs. The "push %cs" is needed because, in + * paravirtual environments, __KERNEL_CS may not be a valid CS + * value when we do IRET directly. + * + * In case NMI unmasking or performance ever becomes a problem, + * the next best option appears to be MOV-to-CR2 and an + * unconditional jump. That sequence also works on all CPUs, + * but it will fault at CPL3 (i.e. Xen PV and lguest). + * + * CPUID is the conventional way, but it's nasty: it doesn't + * exist on some 486-like CPUs, and it usually exits to a + * hypervisor. + * + * Like all of Linux's memory ordering operations, this is a + * compiler barrier as well. */ - asm volatile("cmpl %2,%1\n\t" - "jl 1f\n\t" - "cpuid\n" - "1:" - : "=a" (tmp) - : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) - : "ebx", "ecx", "edx", "memory"); + register void *__sp asm(_ASM_SP); + +#ifdef CONFIG_X86_32 + asm volatile ( + "pushfl\n\t" + "pushl %%cs\n\t" + "pushl $1f\n\t" + "iret\n\t" + "1:" + : "+r" (__sp) : : "memory"); #else - /* - * CPUID is a barrier to speculative execution. - * Prefetched instructions are automatically - * invalidated when modified. - */ - asm volatile("cpuid" - : "=a" (tmp) - : "0" (1) - : "ebx", "ecx", "edx", "memory"); + unsigned int tmp; + + asm volatile ( + "mov %%ss, %0\n\t" + "pushq %q0\n\t" + "pushq %%rsp\n\t" + "addq $8, (%%rsp)\n\t" + "pushfq\n\t" + "mov %%cs, %0\n\t" + "pushq %q0\n\t" + "pushq $1f\n\t" + "iretq\n\t" + "1:" + : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); #endif } diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index c5a7f3a930dd..6fa75b17aec3 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -12,7 +12,7 @@ struct unwind_state { struct task_struct *task; int graph_idx; #ifdef CONFIG_FRAME_POINTER - unsigned long *bp; + unsigned long *bp, *orig_sp; struct pt_regs *regs; #else unsigned long *sp; diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 6ba793178441..7ba7e90a9ad6 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -59,7 +59,7 @@ struct x86_init_irqs { /** * struct x86_init_oem - oem platform specific customizing functions - * @arch_setup: platform specific architecure setup + * @arch_setup: platform specific architecture setup * @banner: print a platform specific banner */ struct x86_init_oem { @@ -165,8 +165,25 @@ struct x86_legacy_devices { }; /** + * enum x86_legacy_i8042_state - i8042 keyboard controller state + * @X86_LEGACY_I8042_PLATFORM_ABSENT: the controller is always absent on + * given platform/subarch. + * @X86_LEGACY_I8042_FIRMWARE_ABSENT: firmware reports that the controller + * is absent. + * @X86_LEGACY_i8042_EXPECTED_PRESENT: the controller is likely to be + * present, the i8042 driver should probe for controller existence. + */ +enum x86_legacy_i8042_state { + X86_LEGACY_I8042_PLATFORM_ABSENT, + X86_LEGACY_I8042_FIRMWARE_ABSENT, + X86_LEGACY_I8042_EXPECTED_PRESENT, +}; + +/** * struct x86_legacy_features - legacy x86 features * + * @i8042: indicated if we expect the device to have i8042 controller + * present. * @rtc: this device has a CMOS real-time clock present * @reserve_bios_regions: boot code will search for the EBDA address and the * start of the 640k - 1M BIOS region. If false, the platform must @@ -175,6 +192,7 @@ struct x86_legacy_devices { * documentation for further details. */ struct x86_legacy_features { + enum x86_legacy_i8042_state i8042; int rtc; int reserve_bios_regions; struct x86_legacy_devices devices; @@ -188,15 +206,14 @@ struct x86_legacy_features { * @set_wallclock: set time back to HW clock * @is_untracked_pat_range exclude from PAT logic * @nmi_init enable NMI on cpus - * @i8042_detect pre-detect if i8042 controller exists * @save_sched_clock_state: save state for sched_clock() on suspend * @restore_sched_clock_state: restore state for sched_clock() on resume - * @apic_post_init: adjust apic if neeeded + * @apic_post_init: adjust apic if needed * @legacy: legacy features * @set_legacy_features: override legacy features. Use of this callback * is highly discouraged. You should only need * this if your hardware platform requires further - * custom fine tuning far beyong what may be + * custom fine tuning far beyond what may be * possible in x86_early_init_platform_quirks() by * only using the current x86_hardware_subarch * semantics. @@ -210,7 +227,6 @@ struct x86_platform_ops { bool (*is_untracked_pat_range)(u64 start, u64 end); void (*nmi_init)(void); unsigned char (*get_nmi_reason)(void); - int (*i8042_detect)(void); void (*save_sched_clock_state)(void); void (*restore_sched_clock_state)(void); void (*apic_post_init)(void); |