diff options
Diffstat (limited to 'arch/x86/include')
25 files changed, 130 insertions, 199 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 94c18ebfd68c..5391b0ae7cc3 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -145,7 +145,6 @@ static inline void disable_acpi(void) { } #define ARCH_HAS_POWER_INIT 1 #ifdef CONFIG_ACPI_NUMA -extern int acpi_numa; extern int x86_acpi_numa_init(void); #endif /* CONFIG_ACPI_NUMA */ @@ -170,4 +169,6 @@ static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) } #endif +#define ACPI_TABLE_UPGRADE_MAX_PHYS (max_low_pfn_mapped << PAGE_SHIFT) + #endif /* _ASM_X86_ACPI_H */ diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 59d34c521d96..9b7fa6313f1a 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -16,6 +16,7 @@ extern void prefill_possible_map(void); static inline void prefill_possible_map(void) {} #define cpu_physical_id(cpu) boot_cpu_physical_apicid +#define cpu_acpi_id(cpu) 0 #define safe_smp_processor_id() 0 #endif /* CONFIG_SMP */ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 483fb547e3c0..1d2b69fc0ceb 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -49,43 +49,59 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define test_cpu_cap(c, bit) \ test_bit(bit, (unsigned long *)((c)->x86_capability)) -#define REQUIRED_MASK_BIT_SET(bit) \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0 )) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1 )) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2 )) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3 )) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4 )) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5 )) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6 )) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7 )) || \ - (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8 )) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9 )) || \ - (((bit)>>5)==10 && (1UL<<((bit)&31) & REQUIRED_MASK10)) || \ - (((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \ - (((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \ - (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \ - (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \ - (((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \ - (((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) ) - -#define DISABLED_MASK_BIT_SET(bit) \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0 )) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1 )) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2 )) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3 )) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4 )) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5 )) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6 )) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7 )) || \ - (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8 )) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9 )) || \ - (((bit)>>5)==10 && (1UL<<((bit)&31) & DISABLED_MASK10)) || \ - (((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \ - (((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \ - (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \ - (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \ - (((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \ - (((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) ) +/* + * There are 32 bits/features in each mask word. The high bits + * (selected with (bit>>5) give us the word number and the low 5 + * bits give us the bit/feature number inside the word. + * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can + * see if it is set in the mask word. + */ +#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \ + (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word )) + +#define REQUIRED_MASK_BIT_SET(feature_bit) \ + ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ + REQUIRED_MASK_CHECK || \ + BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + +#define DISABLED_MASK_BIT_SET(feature_bit) \ + ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ + DISABLED_MASK_CHECK || \ + BUILD_BUG_ON_ZERO(NCAPINTS != 18)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c64b1e9c5d1a..92a8308b96f6 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -225,7 +225,6 @@ #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ @@ -310,5 +309,5 @@ #endif #define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ - +#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 911e9358ceb1..85599ad4d024 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -56,5 +56,7 @@ #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) +#define DISABLED_MASK17 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 74a2e312e8a2..02aff0867211 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -48,6 +48,7 @@ /* AVX VEX prefixes */ #define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */ #define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */ +#define INAT_PFX_EVEX 15 /* EVEX prefix */ #define INAT_LSTPFX_MAX 3 #define INAT_LGCPFX_MAX 11 @@ -89,6 +90,7 @@ #define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4)) #define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5)) #define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6)) +#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7)) /* Attribute making macros for attribute tables */ #define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) #define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) @@ -141,7 +143,13 @@ static inline int inat_last_prefix_id(insn_attr_t attr) static inline int inat_is_vex_prefix(insn_attr_t attr) { attr &= INAT_PFX_MASK; - return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3; + return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 || + attr == INAT_PFX_EVEX; +} + +static inline int inat_is_evex_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX; } static inline int inat_is_vex3_prefix(insn_attr_t attr) @@ -216,6 +224,11 @@ static inline int inat_accept_vex(insn_attr_t attr) static inline int inat_must_vex(insn_attr_t attr) { - return attr & INAT_VEXONLY; + return attr & (INAT_VEXONLY | INAT_EVEXONLY); +} + +static inline int inat_must_evex(insn_attr_t attr) +{ + return attr & INAT_EVEXONLY; } #endif diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index e7814b74caf8..b3e32b010ab1 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -91,6 +91,7 @@ struct insn { #define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */ #define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */ /* VEX bit fields */ +#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */ #define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */ #define X86_VEX2_M 1 /* VEX2.M always 1 */ #define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */ @@ -133,6 +134,13 @@ static inline int insn_is_avx(struct insn *insn) return (insn->vex_prefix.value != 0); } +static inline int insn_is_evex(struct insn *insn) +{ + if (!insn->prefixes.got) + insn_get_prefixes(insn); + return (insn->vex_prefix.nbytes == 4); +} + /* Ensure this instruction is decoded completely */ static inline int insn_complete(struct insn *insn) { @@ -144,8 +152,10 @@ static inline insn_byte_t insn_vex_m_bits(struct insn *insn) { if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ return X86_VEX2_M; - else + else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VEX */ return X86_VEX3_M(insn->vex_prefix.bytes[1]); + else /* EVEX */ + return X86_EVEX_M(insn->vex_prefix.bytes[1]); } static inline insn_byte_t insn_vex_p_bits(struct insn *insn) diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 6999f7d01a0d..627719475457 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -8,7 +8,7 @@ * "Extreme" ones, like Broadwell-E. * * Things ending in "2" are usually because we have no better - * name for them. There's no processor called "WESTMERE2". + * name for them. There's no processor called "SILVERMONT2". */ #define INTEL_FAM6_CORE_YONAH 0x0E @@ -18,10 +18,10 @@ #define INTEL_FAM6_CORE2_DUNNINGTON 0x1D #define INTEL_FAM6_NEHALEM 0x1E +#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ #define INTEL_FAM6_NEHALEM_EP 0x1A #define INTEL_FAM6_NEHALEM_EX 0x2E #define INTEL_FAM6_WESTMERE 0x25 -#define INTEL_FAM6_WESTMERE2 0x1F #define INTEL_FAM6_WESTMERE_EP 0x2C #define INTEL_FAM6_WESTMERE_EX 0x2F diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index a7f9181f63f3..ed80003ce3e2 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -22,7 +22,6 @@ #define _ASM_X86_LIVEPATCH_H #include <asm/setup.h> -#include <linux/module.h> #include <linux/ftrace.h> static inline int klp_check_compiler_support(void) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 9d3a96c4da78..da0d81fa0b54 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -133,40 +133,14 @@ static inline unsigned int x86_cpuid_family(void) #ifdef CONFIG_MICROCODE extern void __init load_ucode_bsp(void); extern void load_ucode_ap(void); -extern int __init save_microcode_in_initrd(void); void reload_early_microcode(void); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); #else static inline void __init load_ucode_bsp(void) { } static inline void load_ucode_ap(void) { } -static inline int __init save_microcode_in_initrd(void) { return 0; } static inline void reload_early_microcode(void) { } static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } #endif -static inline unsigned long get_initrd_start(void) -{ -#ifdef CONFIG_BLK_DEV_INITRD - return initrd_start; -#else - return 0; -#endif -} - -static inline unsigned long get_initrd_start_addr(void) -{ -#ifdef CONFIG_BLK_DEV_INITRD -#ifdef CONFIG_X86_32 - unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start); - - return (unsigned long)__pa_nodebug(*initrd_start_p); -#else - return get_initrd_start(); -#endif -#else /* CONFIG_BLK_DEV_INITRD */ - return 0; -#endif -} - #endif /* _ASM_X86_MICROCODE_H */ diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index adfc847a395e..15eb75484cc0 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -62,7 +62,6 @@ extern int apply_microcode_amd(int cpu); extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); #define PATCH_MAX_SIZE PAGE_SIZE -extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; #ifdef CONFIG_MICROCODE_AMD extern void __init load_ucode_amd_bsp(unsigned int family); diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h index 603417f8dd6c..5e69154c9f07 100644 --- a/arch/x86/include/asm/microcode_intel.h +++ b/arch/x86/include/asm/microcode_intel.h @@ -70,9 +70,4 @@ static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; static inline void reload_ucode_intel(void) {} #endif -#ifdef CONFIG_HOTPLUG_CPU -extern int save_mc_for_early(u8 *mc); -#else -static inline int save_mc_for_early(u8 *mc) { return 0; } -#endif #endif /* _ASM_X86_MICROCODE_INTEL_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 5a73a9c62c39..56f4c6676b29 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -64,8 +64,6 @@ #define MSR_OFFCORE_RSP_0 0x000001a6 #define MSR_OFFCORE_RSP_1 0x000001a7 -#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad -#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae #define MSR_TURBO_RATIO_LIMIT 0x000001ad #define MSR_TURBO_RATIO_LIMIT1 0x000001ae #define MSR_TURBO_RATIO_LIMIT2 0x000001af diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 0deeb2d26df7..f37f2d8a2989 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -97,7 +97,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) */ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { - if (!current_set_polling_and_test()) { + if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { mb(); clflush((void *)¤t_thread_info()->flags); diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index 3a52ee0e726d..3bae4969ac65 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -13,7 +13,8 @@ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G * and CONFIG_HIGHMEM64G options in the kernel configuration. */ -#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +#define __PAGE_OFFSET_BASE _AC(CONFIG_PAGE_OFFSET, UL) +#define __PAGE_OFFSET __PAGE_OFFSET_BASE #define __START_KERNEL_map __PAGE_OFFSET diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 574c23cf761a..b6d425999f99 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -81,7 +81,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *page; - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + page = alloc_pages(gfp, 0); if (!page) return NULL; if (!pgtable_pmd_page_ctor(page)) { @@ -125,7 +129,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return (pud_t *)get_zeroed_page(GFP_KERNEL); + gfp_t gfp = GFP_KERNEL_ACCOUNT; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + return (pud_t *)get_zeroed_page(gfp); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index fbc5e92e1ecc..643eba42d620 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -26,13 +26,11 @@ * @n: length of the copy in bytes * * Copy data to persistent memory media via non-temporal stores so that - * a subsequent arch_wmb_pmem() can flush cpu and memory controller - * write buffers to guarantee durability. + * a subsequent pmem driver flush operation will drain posted write queues. */ -static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t n) +static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) { - int unwritten; + int rem; /* * We are copying between two kernel buffers, if @@ -40,59 +38,36 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, * fault) we would have already reported a general protection fault * before the WARN+BUG. */ - unwritten = __copy_from_user_inatomic_nocache((void __force *) dst, - (void __user *) src, n); - if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n", - __func__, dst, src, unwritten)) + rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n); + if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n", + __func__, dst, src, rem)) BUG(); } -static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, - size_t n) +static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) { if (static_cpu_has(X86_FEATURE_MCE_RECOVERY)) - return memcpy_mcsafe(dst, (void __force *) src, n); - memcpy(dst, (void __force *) src, n); + return memcpy_mcsafe(dst, src, n); + memcpy(dst, src, n); return 0; } /** - * arch_wmb_pmem - synchronize writes to persistent memory - * - * After a series of arch_memcpy_to_pmem() operations this drains data - * from cpu write buffers and any platform (memory controller) buffers - * to ensure that written data is durable on persistent memory media. - */ -static inline void arch_wmb_pmem(void) -{ - /* - * wmb() to 'sfence' all previous writes such that they are - * architecturally visible to 'pcommit'. Note, that we've - * already arranged for pmem writes to avoid the cache via - * arch_memcpy_to_pmem(). - */ - wmb(); - pcommit_sfence(); -} - -/** * arch_wb_cache_pmem - write back a cache range with CLWB * @vaddr: virtual start address * @size: number of bytes to write back * * Write back a cache range using the CLWB (cache line write back) - * instruction. This function requires explicit ordering with an - * arch_wmb_pmem() call. + * instruction. */ -static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) +static inline void arch_wb_cache_pmem(void *addr, size_t size) { u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; unsigned long clflush_mask = x86_clflush_size - 1; - void *vaddr = (void __force *)addr; - void *vend = vaddr + size; + void *vend = addr + size; void *p; - for (p = (void *)((unsigned long)vaddr & ~clflush_mask); + for (p = (void *)((unsigned long)addr & ~clflush_mask); p < vend; p += x86_clflush_size) clwb(p); } @@ -113,16 +88,14 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i) * @i: iterator with source data * * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. - * This function requires explicit ordering with an arch_wmb_pmem() call. */ -static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, +static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, struct iov_iter *i) { - void *vaddr = (void __force *)addr; size_t len; /* TODO: skip the write-back by always using non-temporal stores */ - len = copy_from_iter_nocache(vaddr, bytes, i); + len = copy_from_iter_nocache(addr, bytes, i); if (__iter_needs_pmem_wb(i)) arch_wb_cache_pmem(addr, bytes); @@ -136,28 +109,16 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, * @size: number of bytes to zero * * Write zeros into the memory range starting at 'addr' for 'size' bytes. - * This function requires explicit ordering with an arch_wmb_pmem() call. */ -static inline void arch_clear_pmem(void __pmem *addr, size_t size) +static inline void arch_clear_pmem(void *addr, size_t size) { - void *vaddr = (void __force *)addr; - - memset(vaddr, 0, size); + memset(addr, 0, size); arch_wb_cache_pmem(addr, size); } -static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) +static inline void arch_invalidate_pmem(void *addr, size_t size) { - clflush_cache_range((void __force *) addr, size); -} - -static inline bool __arch_has_wmb_pmem(void) -{ - /* - * We require that wmb() be an 'sfence', that is only guaranteed on - * 64-bit builds - */ - return static_cpu_has(X86_FEATURE_PCOMMIT); + clflush_cache_range(addr, size); } #endif /* CONFIG_ARCH_HAS_PMEM_API */ #endif /* __ASM_X86_PMEM_H__ */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 6271281f947d..2b5d686ea9f3 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -83,12 +83,6 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code, int si_code); -extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch); -extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, - unsigned long phase1_result); - -extern long syscall_trace_enter(struct pt_regs *); - static inline unsigned long regs_return_value(struct pt_regs *regs) { return regs->ax; diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 4916144e3c42..fac9a5c0abe9 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -99,5 +99,7 @@ #define REQUIRED_MASK14 0 #define REQUIRED_MASK15 0 #define REQUIRED_MASK16 0 +#define REQUIRED_MASK17 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 0576b6157f3a..ebd0c164cd4e 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -33,6 +33,7 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu) } DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); +DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid); @@ -135,6 +136,7 @@ int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_cpu_disable(void); int common_cpu_die(unsigned int cpu); void native_cpu_die(unsigned int cpu); +void hlt_play_dead(void); void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); @@ -147,6 +149,7 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); void smp_store_boot_cpu_info(void); void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index d96d04377765..587d7914ea4b 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -253,52 +253,6 @@ static inline void clwb(volatile void *__p) : [pax] "a" (p)); } -/** - * pcommit_sfence() - persistent commit and fence - * - * The PCOMMIT instruction ensures that data that has been flushed from the - * processor's cache hierarchy with CLWB, CLFLUSHOPT or CLFLUSH is accepted to - * memory and is durable on the DIMM. The primary use case for this is - * persistent memory. - * - * This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT - * with appropriate fencing. - * - * Example: - * void flush_and_commit_buffer(void *vaddr, unsigned int size) - * { - * unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1; - * void *vend = vaddr + size; - * void *p; - * - * for (p = (void *)((unsigned long)vaddr & ~clflush_mask); - * p < vend; p += boot_cpu_data.x86_clflush_size) - * clwb(p); - * - * // SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes - * // MFENCE via mb() also works - * wmb(); - * - * // PCOMMIT and the required SFENCE for ordering - * pcommit_sfence(); - * } - * - * After this function completes the data pointed to by 'vaddr' has been - * accepted to memory and will be durable if the 'vaddr' points to persistent - * memory. - * - * PCOMMIT must always be ordered by an MFENCE or SFENCE, so to help simplify - * things we include both the PCOMMIT and the required SFENCE in the - * alternatives generated by pcommit_sfence(). - */ -static inline void pcommit_sfence(void) -{ - alternative(ASM_NOP7, - ".byte 0x66, 0x0f, 0xae, 0xf8\n\t" /* pcommit */ - "sfence", - X86_FEATURE_PCOMMIT); -} - #define nop() asm volatile ("nop") diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 43e87a3dd95c..cf75871d2f81 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -36,6 +36,7 @@ #include <linux/cpumask.h> #include <asm/mpspec.h> +#include <asm/percpu.h> /* Mappings between logical cpu number and node number */ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 14c63c7e8337..a002b07a7099 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -72,7 +72,6 @@ #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 #define SECONDARY_EXEC_ENABLE_PML 0x00020000 #define SECONDARY_EXEC_XSAVES 0x00100000 -#define SECONDARY_EXEC_PCOMMIT 0x00200000 #define SECONDARY_EXEC_TSC_SCALING 0x02000000 #define PIN_BASED_EXT_INTR_MASK 0x00000001 diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h index 0d809e9fc975..3bdd10d71223 100644 --- a/arch/x86/include/asm/xen/cpuid.h +++ b/arch/x86/include/asm/xen/cpuid.h @@ -76,15 +76,18 @@ /* * Leaf 5 (0x40000x04) * HVM-specific features + * EAX: Features + * EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag) */ -/* EAX Features */ /* Virtualized APIC registers */ #define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized x2APIC accesses */ #define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Memory mapped from other domains has valid IOMMU entries */ #define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2) +/* vcpu id is present in EBX */ +#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) #define XEN_CPUID_MAX_NUM_LEAVES 4 diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index 5b15d94a33f8..37fee272618f 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -78,7 +78,6 @@ #define EXIT_REASON_PML_FULL 62 #define EXIT_REASON_XSAVES 63 #define EXIT_REASON_XRSTORS 64 -#define EXIT_REASON_PCOMMIT 65 #define VMX_EXIT_REASONS \ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ @@ -127,8 +126,7 @@ { EXIT_REASON_INVVPID, "INVVPID" }, \ { EXIT_REASON_INVPCID, "INVPCID" }, \ { EXIT_REASON_XSAVES, "XSAVES" }, \ - { EXIT_REASON_XRSTORS, "XRSTORS" }, \ - { EXIT_REASON_PCOMMIT, "PCOMMIT" } + { EXIT_REASON_XRSTORS, "XRSTORS" } #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 |