diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/cpu_entry_area.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/device.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/dma-direct.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/doublefault.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/fpu/internal.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/io.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/iommu.h | 18 | ||||
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/mshyperv.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_32_types.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/traps.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/unwind_hints.h | 8 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/msgbuf.h | 6 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/sembuf.h | 4 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/shmbuf.h | 6 |
17 files changed, 85 insertions, 33 deletions
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index ea866c7bf31d..804734058c77 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -65,6 +65,13 @@ enum exception_stack_ordering { #endif +#ifdef CONFIG_X86_32 +struct doublefault_stack { + unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)]; + struct x86_hw_tss tss; +} __aligned(PAGE_SIZE); +#endif + /* * cpu_entry_area is a percpu region that contains things needed by the CPU * and early entry/exit code. Real types aren't used for all fields here @@ -86,6 +93,11 @@ struct cpu_entry_area { #endif struct entry_stack_page entry_stack_page; +#ifdef CONFIG_X86_32 + char guard_doublefault_stack[PAGE_SIZE]; + struct doublefault_stack doublefault_stack; +#endif + /* * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because * we need task switches to work, and task switches write to the TSS. diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index a8f6c809d9b1..5e12c63b47aa 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -6,9 +6,6 @@ struct dev_archdata { #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) void *iommu; /* hook for IOMMU specific extension */ #endif -#ifdef CONFIG_STA2X11 - bool is_sta2x11; -#endif }; #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) diff --git a/arch/x86/include/asm/dma-direct.h b/arch/x86/include/asm/dma-direct.h deleted file mode 100644 index 1a19251eaac9..000000000000 --- a/arch/x86/include/asm/dma-direct.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_X86_DMA_DIRECT_H -#define ASM_X86_DMA_DIRECT_H 1 - -bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr); -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr); - -#endif /* ASM_X86_DMA_DIRECT_H */ diff --git a/arch/x86/include/asm/doublefault.h b/arch/x86/include/asm/doublefault.h new file mode 100644 index 000000000000..af9a14ac8962 --- /dev/null +++ b/arch/x86/include/asm/doublefault.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_DOUBLEFAULT_H +#define _ASM_X86_DOUBLEFAULT_H + +#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT) +extern void doublefault_init_cpu_tss(void); +#else +static inline void doublefault_init_cpu_tss(void) +{ +} +#endif + +#endif /* _ASM_X86_DOUBLEFAULT_H */ diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 4c95c365058a..44c48e34d799 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -509,7 +509,7 @@ static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) { - return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; + return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; } /* diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index c38a66661576..c2a7458f912c 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -28,6 +28,19 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; } +/* + * When a ftrace registered caller is tracing a function that is + * also set by a register_ftrace_direct() call, it needs to be + * differentiated in the ftrace_caller trampoline. To do this, we + * place the direct caller in the ORIG_AX part of pt_regs. This + * tells the ftrace_caller that there's a direct caller. + */ +static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) +{ + /* Emulate a call */ + regs->orig_ax = addr; +} + #ifdef CONFIG_DYNAMIC_FTRACE struct dyn_arch_ftrace { diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 6bed97ff6db2..9997521fc5cd 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -180,8 +180,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address) * The default ioremap() behavior is non-cached; if you need something * else, you probably want one of the following. */ -extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); -#define ioremap_nocache ioremap_nocache extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); #define ioremap_uc ioremap_uc extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); @@ -205,10 +203,7 @@ extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long * If the area you are trying to map is a PCI BAR you should have a * look at pci_iomap(). */ -static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) -{ - return ioremap_nocache(offset, size); -} +void __iomem *ioremap(resource_size_t offset, unsigned long size); #define ioremap ioremap extern void iounmap(volatile void __iomem *addr); diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index b91623d521d9..bf1ed2ddc74b 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -2,10 +2,28 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H +#include <linux/acpi.h> + +#include <asm/e820/api.h> + extern int force_iommu, no_iommu; extern int iommu_detected; /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) +static inline int __init +arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) +{ + u64 start = rmrr->base_address; + u64 end = rmrr->end_address + 1; + + if (e820__mapped_all(start, end, E820_TYPE_RESERVED)) + return 0; + + pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", + start, end - 1); + return -EINVAL; +} + #endif /* _ASM_X86_IOMMU_H */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 16ae821483c8..5f33924e200f 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -26,12 +26,14 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, #ifdef CONFIG_PERF_EVENTS +DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key); DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key); static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) { if (static_branch_unlikely(&rdpmc_always_available_key) || - atomic_read(&mm->context.perf_rdpmc_allowed)) + (!static_branch_unlikely(&rdpmc_never_available_key) && + atomic_read(&mm->context.perf_rdpmc_allowed))) cr4_set_bits_irqsoff(X86_CR4_PCE); else cr4_clear_bits_irqsoff(X86_CR4_PCE); diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index f4138aeb4280..6b79515abb82 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -219,6 +219,7 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) void __init hyperv_init(void); void hyperv_setup_mmu_ops(void); void *hv_alloc_hyperv_page(void); +void *hv_alloc_hyperv_zeroed_page(void); void hv_free_hyperv_page(unsigned long addr); void hyperv_reenlightenment_intr(struct pt_regs *regs); void set_hv_tscchange_cb(void (*cb)(void)); diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index 19f5807260c3..0416d42e5bdd 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -41,10 +41,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ #endif /* - * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c - * to avoid include recursion hell + * This is an upper bound on sizeof(struct cpu_entry_area) / PAGE_SIZE. + * Define this here and validate with BUILD_BUG_ON() in cpu_entry_area.c + * to avoid include recursion hell. */ -#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 41) +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 43) /* The +1 is for the readonly IDT page: */ #define CPU_ENTRY_AREA_BASE \ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index e51afbb0cbfb..0340aad3f2fc 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -166,7 +166,6 @@ enum cpuid_regs_idx { extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; -extern struct x86_hw_tss doublefault_tss; extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; @@ -997,7 +996,6 @@ bool xen_set_default_idle(void); #endif void stop_this_cpu(void *dummy); -void df_debug(struct pt_regs *regs, long error_code); void microcode_check(void); enum l1tf_mitigations { diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index b25e633033c3..ffa0dc8a535e 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -69,6 +69,9 @@ dotraplinkage void do_overflow(struct pt_regs *regs, long error_code); dotraplinkage void do_bounds(struct pt_regs *regs, long error_code); dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code); dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code); +#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT) +dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2); +#endif dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code); dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code); dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code); diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h index 0bcdb1279361..f5e2eb12cb71 100644 --- a/arch/x86/include/asm/unwind_hints.h +++ b/arch/x86/include/asm/unwind_hints.h @@ -86,6 +86,14 @@ UNWIND_HINT sp_offset=\sp_offset .endm +.macro UNWIND_HINT_SAVE + UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE +.endm + +.macro UNWIND_HINT_RESTORE + UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE +.endm + #else /* !__ASSEMBLY__ */ #define UNWIND_HINT(sp_reg, sp_offset, type, end) \ diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h index 90ab9a795b49..7c5bb43ed8af 100644 --- a/arch/x86/include/uapi/asm/msgbuf.h +++ b/arch/x86/include/uapi/asm/msgbuf.h @@ -15,9 +15,9 @@ struct msqid64_ds { struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - __kernel_time_t msg_rtime; /* last msgrcv time */ - __kernel_time_t msg_ctime; /* last change time */ + __kernel_long_t msg_stime; /* last msgsnd time */ + __kernel_long_t msg_rtime; /* last msgrcv time */ + __kernel_long_t msg_ctime; /* last change time */ __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */ __kernel_ulong_t msg_qnum; /* number of messages in queue */ __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */ diff --git a/arch/x86/include/uapi/asm/sembuf.h b/arch/x86/include/uapi/asm/sembuf.h index 89de6cd9f0a7..93030e97269a 100644 --- a/arch/x86/include/uapi/asm/sembuf.h +++ b/arch/x86/include/uapi/asm/sembuf.h @@ -21,9 +21,9 @@ struct semid64_ds { unsigned long sem_ctime; /* last change time */ unsigned long sem_ctime_high; #else - __kernel_time_t sem_otime; /* last semop time */ + __kernel_long_t sem_otime; /* last semop time */ __kernel_ulong_t __unused1; - __kernel_time_t sem_ctime; /* last change time */ + __kernel_long_t sem_ctime; /* last change time */ __kernel_ulong_t __unused2; #endif __kernel_ulong_t sem_nsems; /* no. of semaphores in array */ diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h index 644421f3823b..f0305dc660c9 100644 --- a/arch/x86/include/uapi/asm/shmbuf.h +++ b/arch/x86/include/uapi/asm/shmbuf.h @@ -16,9 +16,9 @@ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - __kernel_time_t shm_dtime; /* last detach time */ - __kernel_time_t shm_ctime; /* last change time */ + __kernel_long_t shm_atime; /* last attach time */ + __kernel_long_t shm_dtime; /* last detach time */ + __kernel_long_t shm_ctime; /* last change time */ __kernel_pid_t shm_cpid; /* pid of creator */ __kernel_pid_t shm_lpid; /* pid of last operator */ __kernel_ulong_t shm_nattch; /* no. of current attaches */ |