diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-internal.h | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 38 | ||||
-rw-r--r-- | arch/x86/kernel/kvm.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 1 |
8 files changed, 23 insertions, 52 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index dde437f5d14f..158ad1483c43 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -108,7 +108,7 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) cx->type); } snprintf(cx->desc, - ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", + ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x", cx->address); out: return retval; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index cb4a16292aa7..4c2313d0b9ca 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -702,6 +702,10 @@ static void __init l1tf_select_mitigation(void) half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); + pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", + half_pa); + pr_info("However, doing so will make a part of your RAM unusable.\n"); + pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); return; } diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 374d1aa66952..ceb67cd5918f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -113,21 +113,6 @@ static inline void mce_register_injector_chain(struct notifier_block *nb) { } static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } #endif -#ifndef CONFIG_X86_64 -/* - * On 32-bit systems it would be difficult to safely unmap a poison page - * from the kernel 1:1 map because there are no non-canonical addresses that - * we can use to refer to the address without risking a speculative access. - * However, this isn't much of an issue because: - * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which - * are only mapped into the kernel as needed - * 2) Few people would run a 32-bit kernel on a machine that supports - * recoverable errors because they have too much memory to boot 32-bit. - */ -static inline void mce_unmap_kpfn(unsigned long pfn) {} -#define mce_unmap_kpfn mce_unmap_kpfn -#endif - struct mca_config { bool dont_log_ce; bool cmci_disabled; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 4b767284b7f5..953b3ce92dcc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -42,6 +42,7 @@ #include <linux/irq_work.h> #include <linux/export.h> #include <linux/jump_label.h> +#include <linux/set_memory.h> #include <asm/intel-family.h> #include <asm/processor.h> @@ -50,7 +51,6 @@ #include <asm/mce.h> #include <asm/msr.h> #include <asm/reboot.h> -#include <asm/set_memory.h> #include "mce-internal.h" @@ -108,10 +108,6 @@ static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); -#ifndef mce_unmap_kpfn -static void mce_unmap_kpfn(unsigned long pfn); -#endif - /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -602,7 +598,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { pfn = mce->addr >> PAGE_SHIFT; if (!memory_failure(pfn, 0)) - mce_unmap_kpfn(pfn); + set_mce_nospec(pfn); } return NOTIFY_OK; @@ -1072,38 +1068,10 @@ static int do_memory_failure(struct mce *m) if (ret) pr_err("Memory error not recovered"); else - mce_unmap_kpfn(m->addr >> PAGE_SHIFT); + set_mce_nospec(m->addr >> PAGE_SHIFT); return ret; } -#ifndef mce_unmap_kpfn -static void mce_unmap_kpfn(unsigned long pfn) -{ - unsigned long decoy_addr; - - /* - * Unmap this page from the kernel 1:1 mappings to make sure - * we don't log more errors because of speculative access to - * the page. - * We would like to just call: - * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); - * but doing that would radically increase the odds of a - * speculative access to the poison page because we'd have - * the virtual address of the kernel 1:1 mapping sitting - * around in registers. - * Instead we get tricky. We create a non-canonical address - * that looks just like the one we want, but has bit 63 flipped. - * This relies on set_memory_np() not checking whether we passed - * a legal address. - */ - - decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); - - if (set_memory_np(decoy_addr, 1)) - pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); -} -#endif - /* * Cases where we avoid rendezvous handler timeout: diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 0f471bd93417..d9b71924c23c 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -45,6 +45,7 @@ #include <asm/apic.h> #include <asm/apicdef.h> #include <asm/hypervisor.h> +#include <asm/tlb.h> static int kvmapf = 1; @@ -636,8 +637,10 @@ static void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; + pv_mmu_ops.tlb_remove_table = tlb_remove_table; + } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) apic_set_eoi_write(kvm_guest_apic_eoi_write); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 930c88341e4e..afdb303285f8 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -41,6 +41,7 @@ #include <asm/tlbflush.h> #include <asm/timer.h> #include <asm/special_insns.h> +#include <asm/tlb.h> /* * nop stub, which must not clobber anything *including the stack* to @@ -409,6 +410,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .flush_tlb_kernel = native_flush_tlb_global, .flush_tlb_one_user = native_flush_tlb_one_user, .flush_tlb_others = native_flush_tlb_others, + .tlb_remove_table = (void (*)(struct mmu_gather *, void *))tlb_remove_page, .pgd_alloc = __paravirt_pgd_alloc, .pgd_free = paravirt_nop, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index acfd04121da3..7ba73fe0d917 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -40,8 +40,14 @@ int iommu_detected __read_mostly = 0; * devices and allow every device to access to whole physical memory. This is * useful if a user wants to use an IOMMU only for KVM device assignment to * guests and not for driver dma translation. + * It is also possible to disable by default in kernel config, and enable with + * iommu=nopt at boot time. */ +#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH +int iommu_pass_through __read_mostly = 1; +#else int iommu_pass_through __read_mostly; +#endif extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; @@ -135,6 +141,8 @@ static __init int iommu_setup(char *p) #endif if (!strncmp(p, "pt", 2)) iommu_pass_through = 1; + if (!strncmp(p, "nopt", 4)) + iommu_pass_through = 0; gart_parse_options(p); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 476e3ddf8890..a451bc374b9b 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -384,6 +384,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) start_thread_common(regs, new_ip, new_sp, __USER_CS, __USER_DS, 0); } +EXPORT_SYMBOL_GPL(start_thread); #ifdef CONFIG_COMPAT void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) |