diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/boot/dts/am3517.dtsi | 5 | ||||
-rw-r--r-- | arch/arm/boot/dts/armada-38x.dtsi | 2 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 2 | ||||
-rw-r--r-- | arch/arm/mach-omap2/omap-smp.c | 41 | ||||
-rw-r--r-- | arch/arm/mach-pxa/irq.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 9 | ||||
-rw-r--r-- | arch/arm64/Makefile | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/simd.h | 19 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 43 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/ioremap.c | 37 | ||||
-rw-r--r-- | arch/x86/boot/compressed/eboot.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 12 | ||||
-rw-r--r-- | arch/x86/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 61 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 4 | ||||
-rw-r--r-- | arch/x86/purgatory/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/xen/enlighten_pv.c | 25 | ||||
-rw-r--r-- | arch/x86/xen/irq.c | 4 |
19 files changed, 194 insertions, 97 deletions
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi index 4b6062b631b1..23ea381d363f 100644 --- a/arch/arm/boot/dts/am3517.dtsi +++ b/arch/arm/boot/dts/am3517.dtsi @@ -91,6 +91,11 @@ }; }; +/* Table Table 5-79 of the TRM shows 480ab000 is reserved */ +&usb_otg_hs { + status = "disabled"; +}; + &iva { status = "disabled"; }; diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 18edc9bc7927..929459c42760 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -547,7 +547,7 @@ thermal: thermal@e8078 { compatible = "marvell,armada380-thermal"; - reg = <0xe4078 0x4>, <0xe4074 0x4>; + reg = <0xe4078 0x4>, <0xe4070 0x8>; status = "okay"; }; diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index dd546d65a383..7a9b86978ee1 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -177,7 +177,7 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1]) bic r0, r0, #CR_I #endif mcr p15, 0, r0, c1, c0, 0 @ write control reg - isb + instr_sync #elif defined (CONFIG_CPU_V7M) #ifdef CONFIG_ARM_MPU ldreq r3, [r12, MPU_CTRL] diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 69df3620eca5..1c73694c871a 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void) static inline void omap5_erratum_workaround_801819(void) { } #endif +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +/* + * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with + * ICIALLU) to activate the workaround for secondary Core. + * NOTE: it is assumed that the primary core's configuration is done + * by the boot loader (kernel will detect a misconfiguration and complain + * if this is not done). + * + * In General Purpose(GP) devices, ACR bit settings can only be done + * by ROM code in "secure world" using the smc call and there is no + * option to update the "firmware" on such devices. This also works for + * High security(HS) devices, as a backup option in case the + * "update" is not done in the "security firmware". + */ +static void omap5_secondary_harden_predictor(void) +{ + u32 acr, acr_mask; + + asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); + + /* + * ACTLR[0] (Enable invalidates of BTB with ICIALLU) + */ + acr_mask = BIT(0); + + /* Do we already have it done.. if yes, skip expensive smc */ + if ((acr & acr_mask) == acr_mask) + return; + + acr |= acr_mask; + omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); + + pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n", + __func__, smp_processor_id()); +} +#else +static inline void omap5_secondary_harden_predictor(void) { } +#endif + static void omap4_secondary_init(unsigned int cpu) { /* @@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu) set_cntfreq(); /* Configure ACR to disable streaming WA for 801819 */ omap5_erratum_workaround_801819(); + /* Enable ACR to allow for ICUALLU workaround */ + omap5_secondary_harden_predictor(); } /* diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 9c10248fadcc..4e8c2116808e 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -185,7 +185,7 @@ static int pxa_irq_suspend(void) { int i; - for (i = 0; i < pxa_internal_irq_nr / 32; i++) { + for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); saved_icmr[i] = __raw_readl(base + ICMR); @@ -204,7 +204,7 @@ static void pxa_irq_resume(void) { int i; - for (i = 0; i < pxa_internal_irq_nr / 32; i++) { + for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); __raw_writel(saved_icmr[i], base + ICMR); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c186474422f3..0cc8e04295a4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused) return 0; } +static int kernel_set_to_readonly __read_mostly; + void mark_rodata_ro(void) { + kernel_set_to_readonly = 1; stop_machine(__mark_rodata_ro, NULL, NULL); debug_checkwx(); } void set_kernel_text_rw(void) { + if (!kernel_set_to_readonly) + return; + set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, current->active_mm); } void set_kernel_text_ro(void) { + if (!kernel_set_to_readonly) + return; + set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, current->active_mm); } diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 7976d2d242fa..e7101b19d590 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -60,13 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB -LDFLAGS += -EB -maarch64elfb +# We must use the linux target here, since distributions don't tend to package +# the ELF linker scripts with binutils, and this results in a build failure. +LDFLAGS += -EB -maarch64linuxb UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL -LDFLAGS += -EL -maarch64elf +LDFLAGS += -EL -maarch64linux # See comment above UTS_MACHINE := aarch64 endif diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); static __must_check inline bool may_use_simd(void) { /* - * The raw_cpu_read() is racy if called with preemption enabled. - * This is not a bug: kernel_neon_busy is only set when - * preemption is disabled, so we cannot migrate to another CPU - * while it is set, nor can we migrate to a CPU where it is set. - * So, if we find it clear on some CPU then we're guaranteed to - * find it clear on any CPU we could migrate to. - * - * If we are in between kernel_neon_begin()...kernel_neon_end(), - * the flag will be set, but preemption is also disabled, so we - * can't migrate to another CPU and spuriously see it become - * false. + * kernel_neon_busy is only set while preemption is disabled, + * and is clear whenever preemption is enabled. Since + * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy + * cannot change under our feet -- if it's set we cannot be + * migrated, and if it's clear we cannot be migrated to a CPU + * where it is set. */ return !in_irq() && !irqs_disabled() && !in_nmi() && - !raw_cpu_read(kernel_neon_busy); + !this_cpu_read(kernel_neon_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 8d85046adcc8..9670e70139fd 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -29,6 +29,7 @@ #include <linux/kallsyms.h> #include <linux/random.h> #include <linux/prctl.h> +#include <linux/nmi.h> #include <asm/asm.h> #include <asm/bootinfo.h> @@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp) return sp & ALMASK; } -static void arch_dump_stack(void *info) +static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); +static struct cpumask backtrace_csd_busy; + +static void handle_backtrace(void *info) { - struct pt_regs *regs; + nmi_cpu_backtrace(get_irq_regs()); + cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); +} - regs = get_irq_regs(); +static void raise_backtrace(cpumask_t *mask) +{ + call_single_data_t *csd; + int cpu; - if (regs) - show_regs(regs); + for_each_cpu(cpu, mask) { + /* + * If we previously sent an IPI to the target CPU & it hasn't + * cleared its bit in the busy cpumask then it didn't handle + * our previous IPI & it's not safe for us to reuse the + * call_single_data_t. + */ + if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { + pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", + cpu); + continue; + } - dump_stack(); + csd = &per_cpu(backtrace_csd, cpu); + csd->func = handle_backtrace; + smp_call_function_single_async(cpu, csd); + } } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { - long this_cpu = get_cpu(); - - if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) - dump_stack(); - - smp_call_function_many(mask, arch_dump_stack, NULL, 1); - - put_cpu(); + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); } int mips_get_process_fp_mode(struct task_struct *task) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index d67fa74622ee..8d505a21396e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs) void show_regs(struct pt_regs *regs) { __show_regs((struct pt_regs *)regs); + dump_stack(); } void show_registers(struct pt_regs *regs) diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 1986e09fb457..1601d90b087b 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -9,6 +9,7 @@ #include <linux/export.h> #include <asm/addrspace.h> #include <asm/byteorder.h> +#include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, return error; } +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, + void *arg) +{ + unsigned long i; + + for (i = 0; i < nr_pages; i++) { + if (pfn_valid(start_pfn + i) && + !PageReserved(pfn_to_page(start_pfn + i))) + return 1; + } + + return 0; +} + /* * Generic mapping function (not visible outside): */ @@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) { + unsigned long offset, pfn, last_pfn; struct vm_struct * area; - unsigned long offset; phys_addr_t last_addr; void * addr; @@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long return (void __iomem *) CKSEG1ADDR(phys_addr); /* - * Don't allow anybody to remap normal RAM that we're using.. + * Don't allow anybody to remap RAM that may be allocated by the page + * allocator, since that could lead to races & data clobbering. */ - if (phys_addr < virt_to_phys(high_memory)) { - char *t_addr, *t_end; - struct page *page; - - t_addr = __va(phys_addr); - t_end = t_addr + (size - 1); - - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) - if(!PageReserved(page)) - return NULL; + pfn = PFN_DOWN(phys_addr); + last_pfn = PFN_DOWN(last_addr); + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); + return NULL; } /* diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index e57665b4ba1c..e98522ea6f09 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -114,18 +114,12 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) struct pci_setup_rom *rom = NULL; efi_status_t status; unsigned long size; - uint64_t attributes, romsize; + uint64_t romsize; void *romimage; - status = efi_call_proto(efi_pci_io_protocol, attributes, pci, - EfiPciIoAttributeOperationGet, 0ULL, - &attributes); - if (status != EFI_SUCCESS) - return status; - /* - * Some firmware images contain EFI function pointers at the place where the - * romimage and romsize fields are supposed to be. Typically the EFI + * Some firmware images contain EFI function pointers at the place where + * the romimage and romsize fields are supposed to be. Typically the EFI * code is mapped at high addresses, translating to an unrealistically * large romsize. The UEFI spec limits the size of option ROMs to 16 * MiB so we reject any ROMs over 16 MiB in size to catch this. diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index bf8d1eb7fca3..3b8e7c13c614 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -138,6 +138,7 @@ static unsigned long kvm_get_tsc_khz(void) src = &hv_clock[cpu].pvti; tsc_khz = pvclock_tsc_khz(src); put_cpu(); + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); return tsc_khz; } @@ -319,6 +320,8 @@ void __init kvmclock_init(void) printk(KERN_INFO "kvm-clock: Using msrs %x and %x", msr_kvm_system_time, msr_kvm_wall_clock); + pvclock_set_pvti_cpu0_va(hv_clock); + if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); @@ -366,14 +369,11 @@ int __init kvm_setup_vsyscall_timeinfo(void) vcpu_time = &hv_clock[cpu].pvti; flags = pvclock_read_flags(vcpu_time); - if (!(flags & PVCLOCK_TSC_STABLE_BIT)) { - put_cpu(); - return 1; - } - - pvclock_set_pvti_cpu0_va(hv_clock); put_cpu(); + if (!(flags & PVCLOCK_TSC_STABLE_BIT)) + return 1; + kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; #endif return 0; diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 92fd433c50b9..1bbec387d289 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -85,7 +85,7 @@ config KVM_AMD_SEV def_bool y bool "AMD Secure Encrypted Virtualization (SEV) support" depends on KVM_AMD && X86_64 - depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP + depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) ---help--- Provides support for launching Encrypted VMs on AMD processors. diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1689f433f3a0..e30da9a2430c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2571,6 +2571,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); #ifdef CONFIG_X86_64 int cpu = raw_smp_processor_id(); + unsigned long fs_base, kernel_gs_base; #endif int i; @@ -2586,12 +2587,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; #ifdef CONFIG_X86_64 - save_fsgs_for_kvm(); - vmx->host_state.fs_sel = current->thread.fsindex; - vmx->host_state.gs_sel = current->thread.gsindex; -#else - savesegment(fs, vmx->host_state.fs_sel); - savesegment(gs, vmx->host_state.gs_sel); + if (likely(is_64bit_mm(current->mm))) { + save_fsgs_for_kvm(); + vmx->host_state.fs_sel = current->thread.fsindex; + vmx->host_state.gs_sel = current->thread.gsindex; + fs_base = current->thread.fsbase; + kernel_gs_base = current->thread.gsbase; + } else { +#endif + savesegment(fs, vmx->host_state.fs_sel); + savesegment(gs, vmx->host_state.gs_sel); +#ifdef CONFIG_X86_64 + fs_base = read_msr(MSR_FS_BASE); + kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); + } #endif if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); @@ -2611,10 +2620,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) savesegment(ds, vmx->host_state.ds_sel); savesegment(es, vmx->host_state.es_sel); - vmcs_writel(HOST_FS_BASE, current->thread.fsbase); + vmcs_writel(HOST_FS_BASE, fs_base); vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu)); - vmx->msr_host_kernel_gs_base = current->thread.gsbase; + vmx->msr_host_kernel_gs_base = kernel_gs_base; if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #else @@ -4322,11 +4331,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) vmcs_conf->order = get_order(vmcs_conf->size); vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; - /* KVM supports Enlightened VMCS v1 only */ - if (static_branch_unlikely(&enable_evmcs)) - vmcs_conf->revision_id = KVM_EVMCS_VERSION; - else - vmcs_conf->revision_id = vmx_msr_low; + vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; @@ -4396,7 +4401,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); - vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ + + /* KVM supports Enlightened VMCS v1 only */ + if (static_branch_unlikely(&enable_evmcs)) + vmcs->revision_id = KVM_EVMCS_VERSION; + else + vmcs->revision_id = vmcs_config.revision_id; + return vmcs; } @@ -4564,6 +4575,19 @@ static __init int alloc_kvm_area(void) return -ENOMEM; } + /* + * When eVMCS is enabled, alloc_vmcs_cpu() sets + * vmcs->revision_id to KVM_EVMCS_VERSION instead of + * revision_id reported by MSR_IA32_VMX_BASIC. + * + * However, even though not explictly documented by + * TLFS, VMXArea passed as VMXON argument should + * still be marked with revision_id reported by + * physical CPU. + */ + if (static_branch_unlikely(&enable_evmcs)) + vmcs->revision_id = vmcs_config.revision_id; + per_cpu(vmxarea, cpu) = vmcs; } return 0; @@ -11753,7 +11777,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - u32 msr_entry_idx; u32 exit_qual; int r; @@ -11775,10 +11798,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) nested_get_vmcs12_pages(vcpu, vmcs12); r = EXIT_REASON_MSR_LOAD_FAIL; - msr_entry_idx = nested_vmx_load_msr(vcpu, - vmcs12->vm_entry_msr_load_addr, - vmcs12->vm_entry_msr_load_count); - if (msr_entry_idx) + exit_qual = nested_vmx_load_msr(vcpu, + vmcs12->vm_entry_msr_load_addr, + vmcs12->vm_entry_msr_load_count); + if (exit_qual) goto fail; /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0046aa70205a..2b812b3c5088 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1097,6 +1097,7 @@ static u32 msr_based_features[] = { MSR_F10H_DECFG, MSR_IA32_UCODE_REV, + MSR_IA32_ARCH_CAPABILITIES, }; static unsigned int num_msr_based_features; @@ -1105,7 +1106,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) { switch (msr->index) { case MSR_IA32_UCODE_REV: - rdmsrl(msr->index, msr->data); + case MSR_IA32_ARCH_CAPABILITIES: + rdmsrl_safe(msr->index, &msr->data); break; default: if (kvm_x86_ops->get_msr_feature(msr)) diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 2e9ee023e6bc..81a8e33115ad 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string targets += $(purgatory-y) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) -$(obj)/sha256.o: $(srctree)/lib/sha256.c +$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE $(call if_changed_rule,cc_o_c) LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 8d4e2e1ae60b..439a94bf89ad 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1207,12 +1207,20 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_setup_features(); - xen_setup_machphys_mapping(); - /* Install Xen paravirt ops */ pv_info = xen_info; pv_init_ops.patch = paravirt_patch_default; pv_cpu_ops = xen_cpu_ops; + xen_init_irq_ops(); + + /* + * Setup xen_vcpu early because it is needed for + * local_irq_disable(), irqs_disabled(), e.g. in printk(). + * + * Don't do the full vcpu_info placement stuff until we have + * the cpu_possible_mask and a non-dummy shared_info. + */ + xen_vcpu_info_reset(0); x86_platform.get_nmi_reason = xen_get_nmi_reason; @@ -1225,10 +1233,12 @@ asmlinkage __visible void __init xen_start_kernel(void) * Set up some pagetable state before starting to set any ptes. */ + xen_setup_machphys_mapping(); xen_init_mmu_ops(); /* Prevent unwanted bits from being set in PTEs. */ __supported_pte_mask &= ~_PAGE_GLOBAL; + __default_kernel_pte_mask &= ~_PAGE_GLOBAL; /* * Prevent page tables from being allocated in highmem, even @@ -1249,20 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void) get_cpu_cap(&boot_cpu_data); x86_configure_nx(); - xen_init_irq_ops(); - /* Let's presume PV guests always boot on vCPU with id 0. */ per_cpu(xen_vcpu_id, 0) = 0; - /* - * Setup xen_vcpu early because idt_setup_early_handler needs it for - * local_irq_disable(), irqs_disabled(). - * - * Don't do the full vcpu_info placement stuff until we have - * the cpu_possible_mask and a non-dummy shared_info. - */ - xen_vcpu_info_reset(0); - idt_setup_early_handler(); xen_init_capabilities(); diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 74179852e46c..7515a19fd324 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = { void __init xen_init_irq_ops(void) { - /* For PVH we use default pv_irq_ops settings. */ - if (!xen_feature(XENFEAT_hvm_callback_vector)) - pv_irq_ops = xen_irq_ops; + pv_irq_ops = xen_irq_ops; x86_init.irqs.intr_init = xen_init_IRQ; } |