diff options
Diffstat (limited to 'tools')
86 files changed, 2675 insertions, 632 deletions
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 1f57bbe82b6f..6edd177bb1c7 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {  	(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)  #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) +/* PL1 Physical Timer Registers */ +#define KVM_REG_ARM_PTIMER_CTL		ARM_CP15_REG32(0, 14, 2, 1) +#define KVM_REG_ARM_PTIMER_CNT		ARM_CP15_REG64(0, 14) +#define KVM_REG_ARM_PTIMER_CVAL		ARM_CP15_REG64(2, 14) + +/* Virtual Timer Registers */  #define KVM_REG_ARM_TIMER_CTL		ARM_CP15_REG32(0, 14, 3, 1)  #define KVM_REG_ARM_TIMER_CNT		ARM_CP15_REG64(1, 14)  #define KVM_REG_ARM_TIMER_CVAL		ARM_CP15_REG64(3, 14) @@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {  #define   KVM_DEV_ARM_ITS_SAVE_TABLES		1  #define   KVM_DEV_ARM_ITS_RESTORE_TABLES	2  #define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES	3 +#define   KVM_DEV_ARM_ITS_CTRL_RESET		4  /* KVM_IRQ_LINE irq field index values */  #define KVM_ARM_IRQ_TYPE_SHIFT		24 diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..b551b741653d --- /dev/null +++ b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_BPF_PERF_EVENT_H__ + +#include <asm/ptrace.h> + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 51149ec75fe4..9abbf3044654 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {  #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) +/* Physical Timer EL0 Registers */ +#define KVM_REG_ARM_PTIMER_CTL		ARM64_SYS_REG(3, 3, 14, 2, 1) +#define KVM_REG_ARM_PTIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 2, 2) +#define KVM_REG_ARM_PTIMER_CNT		ARM64_SYS_REG(3, 3, 14, 0, 1) + +/* EL0 Virtual Timer Registers */  #define KVM_REG_ARM_TIMER_CTL		ARM64_SYS_REG(3, 3, 14, 3, 1)  #define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)  #define KVM_REG_ARM_TIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 0, 2) @@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {  #define   KVM_DEV_ARM_ITS_SAVE_TABLES           1  #define   KVM_DEV_ARM_ITS_RESTORE_TABLES        2  #define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES	3 +#define   KVM_DEV_ARM_ITS_CTRL_RESET		4  /* Device Control API on vcpu fd */  #define KVM_ARM_VCPU_PMU_V3_CTRL	0 diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..0a8e37a519f2 --- /dev/null +++ b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_BPF_PERF_EVENT_H__ + +#include "ptrace.h" + +typedef user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 9ad172dcd912..38535a57fef8 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h @@ -6,10 +6,6 @@   *   * Copyright IBM Corp. 2008   * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2 only) - * as published by the Free Software Foundation. - *   *    Author(s): Carsten Otte <cotte@de.ibm.com>   *               Christian Borntraeger <borntraeger@de.ibm.com>   */ diff --git a/tools/arch/s390/include/uapi/asm/kvm_perf.h b/tools/arch/s390/include/uapi/asm/kvm_perf.h index c36c97ffdc6f..84606b8cc49e 100644 --- a/tools/arch/s390/include/uapi/asm/kvm_perf.h +++ b/tools/arch/s390/include/uapi/asm/kvm_perf.h @@ -4,10 +4,6 @@   *   * Copyright 2014 IBM Corp.   * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2 only) - * as published by the Free Software Foundation.   */  #ifndef __LINUX_KVM_PERF_S390_H diff --git a/tools/arch/s390/include/uapi/asm/perf_regs.h b/tools/arch/s390/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..d17dd9e5d516 --- /dev/null +++ b/tools/arch/s390/include/uapi/asm/perf_regs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_S390_PERF_REGS_H +#define _ASM_S390_PERF_REGS_H + +enum perf_event_s390_regs { +	PERF_REG_S390_R0, +	PERF_REG_S390_R1, +	PERF_REG_S390_R2, +	PERF_REG_S390_R3, +	PERF_REG_S390_R4, +	PERF_REG_S390_R5, +	PERF_REG_S390_R6, +	PERF_REG_S390_R7, +	PERF_REG_S390_R8, +	PERF_REG_S390_R9, +	PERF_REG_S390_R10, +	PERF_REG_S390_R11, +	PERF_REG_S390_R12, +	PERF_REG_S390_R13, +	PERF_REG_S390_R14, +	PERF_REG_S390_R15, +	PERF_REG_S390_FP0, +	PERF_REG_S390_FP1, +	PERF_REG_S390_FP2, +	PERF_REG_S390_FP3, +	PERF_REG_S390_FP4, +	PERF_REG_S390_FP5, +	PERF_REG_S390_FP6, +	PERF_REG_S390_FP7, +	PERF_REG_S390_FP8, +	PERF_REG_S390_FP9, +	PERF_REG_S390_FP10, +	PERF_REG_S390_FP11, +	PERF_REG_S390_FP12, +	PERF_REG_S390_FP13, +	PERF_REG_S390_FP14, +	PERF_REG_S390_FP15, +	PERF_REG_S390_MASK, +	PERF_REG_S390_PC, + +	PERF_REG_S390_MAX +}; + +#endif /* _ASM_S390_PERF_REGS_H */ diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..543dd70e12c8 --- /dev/null +++ b/tools/arch/s390/include/uapi/asm/ptrace.h @@ -0,0 +1,457 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + *  S390 version + *    Copyright IBM Corp. 1999, 2000 + *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + */ + +#ifndef _UAPI_S390_PTRACE_H +#define _UAPI_S390_PTRACE_H + +/* + * Offsets in the user_regs_struct. They are used for the ptrace + * system call and in entry.S + */ +#ifndef __s390x__ + +#define PT_PSWMASK  0x00 +#define PT_PSWADDR  0x04 +#define PT_GPR0     0x08 +#define PT_GPR1     0x0C +#define PT_GPR2     0x10 +#define PT_GPR3     0x14 +#define PT_GPR4     0x18 +#define PT_GPR5     0x1C +#define PT_GPR6     0x20 +#define PT_GPR7     0x24 +#define PT_GPR8     0x28 +#define PT_GPR9     0x2C +#define PT_GPR10    0x30 +#define PT_GPR11    0x34 +#define PT_GPR12    0x38 +#define PT_GPR13    0x3C +#define PT_GPR14    0x40 +#define PT_GPR15    0x44 +#define PT_ACR0     0x48 +#define PT_ACR1     0x4C +#define PT_ACR2     0x50 +#define PT_ACR3     0x54 +#define PT_ACR4	    0x58 +#define PT_ACR5	    0x5C +#define PT_ACR6	    0x60 +#define PT_ACR7	    0x64 +#define PT_ACR8	    0x68 +#define PT_ACR9	    0x6C +#define PT_ACR10    0x70 +#define PT_ACR11    0x74 +#define PT_ACR12    0x78 +#define PT_ACR13    0x7C +#define PT_ACR14    0x80 +#define PT_ACR15    0x84 +#define PT_ORIGGPR2 0x88 +#define PT_FPC	    0x90 +/* + * A nasty fact of life that the ptrace api + * only supports passing of longs. + */ +#define PT_FPR0_HI  0x98 +#define PT_FPR0_LO  0x9C +#define PT_FPR1_HI  0xA0 +#define PT_FPR1_LO  0xA4 +#define PT_FPR2_HI  0xA8 +#define PT_FPR2_LO  0xAC +#define PT_FPR3_HI  0xB0 +#define PT_FPR3_LO  0xB4 +#define PT_FPR4_HI  0xB8 +#define PT_FPR4_LO  0xBC +#define PT_FPR5_HI  0xC0 +#define PT_FPR5_LO  0xC4 +#define PT_FPR6_HI  0xC8 +#define PT_FPR6_LO  0xCC +#define PT_FPR7_HI  0xD0 +#define PT_FPR7_LO  0xD4 +#define PT_FPR8_HI  0xD8 +#define PT_FPR8_LO  0XDC +#define PT_FPR9_HI  0xE0 +#define PT_FPR9_LO  0xE4 +#define PT_FPR10_HI 0xE8 +#define PT_FPR10_LO 0xEC +#define PT_FPR11_HI 0xF0 +#define PT_FPR11_LO 0xF4 +#define PT_FPR12_HI 0xF8 +#define PT_FPR12_LO 0xFC +#define PT_FPR13_HI 0x100 +#define PT_FPR13_LO 0x104 +#define PT_FPR14_HI 0x108 +#define PT_FPR14_LO 0x10C +#define PT_FPR15_HI 0x110 +#define PT_FPR15_LO 0x114 +#define PT_CR_9	    0x118 +#define PT_CR_10    0x11C +#define PT_CR_11    0x120 +#define PT_IEEE_IP  0x13C +#define PT_LASTOFF  PT_IEEE_IP +#define PT_ENDREGS  0x140-1 + +#define GPR_SIZE	4 +#define CR_SIZE		4 + +#define STACK_FRAME_OVERHEAD	96	/* size of minimum stack frame */ + +#else /* __s390x__ */ + +#define PT_PSWMASK  0x00 +#define PT_PSWADDR  0x08 +#define PT_GPR0     0x10 +#define PT_GPR1     0x18 +#define PT_GPR2     0x20 +#define PT_GPR3     0x28 +#define PT_GPR4     0x30 +#define PT_GPR5     0x38 +#define PT_GPR6     0x40 +#define PT_GPR7     0x48 +#define PT_GPR8     0x50 +#define PT_GPR9     0x58 +#define PT_GPR10    0x60 +#define PT_GPR11    0x68 +#define PT_GPR12    0x70 +#define PT_GPR13    0x78 +#define PT_GPR14    0x80 +#define PT_GPR15    0x88 +#define PT_ACR0     0x90 +#define PT_ACR1     0x94 +#define PT_ACR2     0x98 +#define PT_ACR3     0x9C +#define PT_ACR4	    0xA0 +#define PT_ACR5	    0xA4 +#define PT_ACR6	    0xA8 +#define PT_ACR7	    0xAC +#define PT_ACR8	    0xB0 +#define PT_ACR9	    0xB4 +#define PT_ACR10    0xB8 +#define PT_ACR11    0xBC +#define PT_ACR12    0xC0 +#define PT_ACR13    0xC4 +#define PT_ACR14    0xC8 +#define PT_ACR15    0xCC +#define PT_ORIGGPR2 0xD0 +#define PT_FPC	    0xD8 +#define PT_FPR0     0xE0 +#define PT_FPR1     0xE8 +#define PT_FPR2     0xF0 +#define PT_FPR3     0xF8 +#define PT_FPR4     0x100 +#define PT_FPR5     0x108 +#define PT_FPR6     0x110 +#define PT_FPR7     0x118 +#define PT_FPR8     0x120 +#define PT_FPR9     0x128 +#define PT_FPR10    0x130 +#define PT_FPR11    0x138 +#define PT_FPR12    0x140 +#define PT_FPR13    0x148 +#define PT_FPR14    0x150 +#define PT_FPR15    0x158 +#define PT_CR_9     0x160 +#define PT_CR_10    0x168 +#define PT_CR_11    0x170 +#define PT_IEEE_IP  0x1A8 +#define PT_LASTOFF  PT_IEEE_IP +#define PT_ENDREGS  0x1B0-1 + +#define GPR_SIZE	8 +#define CR_SIZE		8 + +#define STACK_FRAME_OVERHEAD	160	 /* size of minimum stack frame */ + +#endif /* __s390x__ */ + +#define NUM_GPRS	16 +#define NUM_FPRS	16 +#define NUM_CRS		16 +#define NUM_ACRS	16 + +#define NUM_CR_WORDS	3 + +#define FPR_SIZE	8 +#define FPC_SIZE	4 +#define FPC_PAD_SIZE	4 /* gcc insists on aligning the fpregs */ +#define ACR_SIZE	4 + + +#define PTRACE_OLDSETOPTIONS	     21 + +#ifndef __ASSEMBLY__ +#include <linux/stddef.h> +#include <linux/types.h> + +typedef union { +	float	f; +	double	d; +	__u64	ui; +	struct +	{ +		__u32 hi; +		__u32 lo; +	} fp; +} freg_t; + +typedef struct { +	__u32	fpc; +	__u32	pad; +	freg_t	fprs[NUM_FPRS]; +} s390_fp_regs; + +#define FPC_EXCEPTION_MASK	0xF8000000 +#define FPC_FLAGS_MASK		0x00F80000 +#define FPC_DXC_MASK		0x0000FF00 +#define FPC_RM_MASK		0x00000003 + +/* this typedef defines how a Program Status Word looks like */ +typedef struct { +	unsigned long mask; +	unsigned long addr; +} __attribute__ ((aligned(8))) psw_t; + +#ifndef __s390x__ + +#define PSW_MASK_PER		0x40000000UL +#define PSW_MASK_DAT		0x04000000UL +#define PSW_MASK_IO		0x02000000UL +#define PSW_MASK_EXT		0x01000000UL +#define PSW_MASK_KEY		0x00F00000UL +#define PSW_MASK_BASE		0x00080000UL	/* always one */ +#define PSW_MASK_MCHECK		0x00040000UL +#define PSW_MASK_WAIT		0x00020000UL +#define PSW_MASK_PSTATE		0x00010000UL +#define PSW_MASK_ASC		0x0000C000UL +#define PSW_MASK_CC		0x00003000UL +#define PSW_MASK_PM		0x00000F00UL +#define PSW_MASK_RI		0x00000000UL +#define PSW_MASK_EA		0x00000000UL +#define PSW_MASK_BA		0x00000000UL + +#define PSW_MASK_USER		0x0000FF00UL + +#define PSW_ADDR_AMODE		0x80000000UL +#define PSW_ADDR_INSN		0x7FFFFFFFUL + +#define PSW_DEFAULT_KEY		(((unsigned long) PAGE_DEFAULT_ACC) << 20) + +#define PSW_ASC_PRIMARY		0x00000000UL +#define PSW_ASC_ACCREG		0x00004000UL +#define PSW_ASC_SECONDARY	0x00008000UL +#define PSW_ASC_HOME		0x0000C000UL + +#else /* __s390x__ */ + +#define PSW_MASK_PER		0x4000000000000000UL +#define PSW_MASK_DAT		0x0400000000000000UL +#define PSW_MASK_IO		0x0200000000000000UL +#define PSW_MASK_EXT		0x0100000000000000UL +#define PSW_MASK_BASE		0x0000000000000000UL +#define PSW_MASK_KEY		0x00F0000000000000UL +#define PSW_MASK_MCHECK		0x0004000000000000UL +#define PSW_MASK_WAIT		0x0002000000000000UL +#define PSW_MASK_PSTATE		0x0001000000000000UL +#define PSW_MASK_ASC		0x0000C00000000000UL +#define PSW_MASK_CC		0x0000300000000000UL +#define PSW_MASK_PM		0x00000F0000000000UL +#define PSW_MASK_RI		0x0000008000000000UL +#define PSW_MASK_EA		0x0000000100000000UL +#define PSW_MASK_BA		0x0000000080000000UL + +#define PSW_MASK_USER		0x0000FF0180000000UL + +#define PSW_ADDR_AMODE		0x0000000000000000UL +#define PSW_ADDR_INSN		0xFFFFFFFFFFFFFFFFUL + +#define PSW_DEFAULT_KEY		(((unsigned long) PAGE_DEFAULT_ACC) << 52) + +#define PSW_ASC_PRIMARY		0x0000000000000000UL +#define PSW_ASC_ACCREG		0x0000400000000000UL +#define PSW_ASC_SECONDARY	0x0000800000000000UL +#define PSW_ASC_HOME		0x0000C00000000000UL + +#endif /* __s390x__ */ + + +/* + * The s390_regs structure is used to define the elf_gregset_t. + */ +typedef struct { +	psw_t psw; +	unsigned long gprs[NUM_GPRS]; +	unsigned int  acrs[NUM_ACRS]; +	unsigned long orig_gpr2; +} s390_regs; + +/* + * The user_pt_regs structure exports the beginning of + * the in-kernel pt_regs structure to user space. + */ +typedef struct { +	unsigned long args[1]; +	psw_t psw; +	unsigned long gprs[NUM_GPRS]; +} user_pt_regs; + +/* + * Now for the user space program event recording (trace) definitions. + * The following structures are used only for the ptrace interface, don't + * touch or even look at it if you don't want to modify the user-space + * ptrace interface. In particular stay away from it for in-kernel PER. + */ +typedef struct { +	unsigned long cr[NUM_CR_WORDS]; +} per_cr_words; + +#define PER_EM_MASK 0xE8000000UL + +typedef struct { +#ifdef __s390x__ +	unsigned		       : 32; +#endif /* __s390x__ */ +	unsigned em_branching	       : 1; +	unsigned em_instruction_fetch  : 1; +	/* +	 * Switching on storage alteration automatically fixes +	 * the storage alteration event bit in the users std. +	 */ +	unsigned em_storage_alteration : 1; +	unsigned em_gpr_alt_unused     : 1; +	unsigned em_store_real_address : 1; +	unsigned		       : 3; +	unsigned branch_addr_ctl       : 1; +	unsigned		       : 1; +	unsigned storage_alt_space_ctl : 1; +	unsigned		       : 21; +	unsigned long starting_addr; +	unsigned long ending_addr; +} per_cr_bits; + +typedef struct { +	unsigned short perc_atmid; +	unsigned long address; +	unsigned char access_id; +} per_lowcore_words; + +typedef struct { +	unsigned perc_branching		 : 1; +	unsigned perc_instruction_fetch  : 1; +	unsigned perc_storage_alteration : 1; +	unsigned perc_gpr_alt_unused	 : 1; +	unsigned perc_store_real_address : 1; +	unsigned			 : 3; +	unsigned atmid_psw_bit_31	 : 1; +	unsigned atmid_validity_bit	 : 1; +	unsigned atmid_psw_bit_32	 : 1; +	unsigned atmid_psw_bit_5	 : 1; +	unsigned atmid_psw_bit_16	 : 1; +	unsigned atmid_psw_bit_17	 : 1; +	unsigned si			 : 2; +	unsigned long address; +	unsigned			 : 4; +	unsigned access_id		 : 4; +} per_lowcore_bits; + +typedef struct { +	union { +		per_cr_words   words; +		per_cr_bits    bits; +	} control_regs; +	/* +	 * The single_step and instruction_fetch bits are obsolete, +	 * the kernel always sets them to zero. To enable single +	 * stepping use ptrace(PTRACE_SINGLESTEP) instead. +	 */ +	unsigned  single_step	    : 1; +	unsigned  instruction_fetch : 1; +	unsigned		    : 30; +	/* +	 * These addresses are copied into cr10 & cr11 if single +	 * stepping is switched off +	 */ +	unsigned long starting_addr; +	unsigned long ending_addr; +	union { +		per_lowcore_words words; +		per_lowcore_bits  bits; +	} lowcore; +} per_struct; + +typedef struct { +	unsigned int  len; +	unsigned long kernel_addr; +	unsigned long process_addr; +} ptrace_area; + +/* + * S/390 specific non posix ptrace requests. I chose unusual values so + * they are unlikely to clash with future ptrace definitions. + */ +#define PTRACE_PEEKUSR_AREA	      0x5000 +#define PTRACE_POKEUSR_AREA	      0x5001 +#define PTRACE_PEEKTEXT_AREA	      0x5002 +#define PTRACE_PEEKDATA_AREA	      0x5003 +#define PTRACE_POKETEXT_AREA	      0x5004 +#define PTRACE_POKEDATA_AREA	      0x5005 +#define PTRACE_GET_LAST_BREAK	      0x5006 +#define PTRACE_PEEK_SYSTEM_CALL       0x5007 +#define PTRACE_POKE_SYSTEM_CALL	      0x5008 +#define PTRACE_ENABLE_TE	      0x5009 +#define PTRACE_DISABLE_TE	      0x5010 +#define PTRACE_TE_ABORT_RAND	      0x5011 + +/* + * The numbers chosen here are somewhat arbitrary but absolutely MUST + * not overlap with any of the number assigned in <linux/ptrace.h>. + */ +#define PTRACE_SINGLEBLOCK	12	/* resume execution until next branch */ + +/* + * PT_PROT definition is loosely based on hppa bsd definition in + * gdb/hppab-nat.c + */ +#define PTRACE_PROT			  21 + +typedef enum { +	ptprot_set_access_watchpoint, +	ptprot_set_write_watchpoint, +	ptprot_disable_watchpoint +} ptprot_flags; + +typedef struct { +	unsigned long lowaddr; +	unsigned long hiaddr; +	ptprot_flags prot; +} ptprot_area; + +/* Sequence of bytes for breakpoint illegal instruction.  */ +#define S390_BREAKPOINT     {0x0,0x1} +#define S390_BREAKPOINT_U16 ((__u16)0x0001) +#define S390_SYSCALL_OPCODE ((__u16)0x0a00) +#define S390_SYSCALL_SIZE   2 + +/* + * The user_regs_struct defines the way the user registers are + * store on the stack for signal handling. + */ +struct user_regs_struct { +	psw_t psw; +	unsigned long gprs[NUM_GPRS]; +	unsigned int  acrs[NUM_ACRS]; +	unsigned long orig_gpr2; +	s390_fp_regs fp_regs; +	/* +	 * These per registers are in here so that gdb can modify them +	 * itself as there is no "official" ptrace interface for hardware +	 * watchpoints. This is the way intel does it. +	 */ +	per_struct per_info; +	unsigned long ieee_instruction_pointer;	/* obsolete, always 0 */ +}; + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_S390_PTRACE_H */ diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 793690fbda36..800104c8a3ed 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -13,173 +13,176 @@  /*   * Defines x86 CPU feature bits   */ -#define NCAPINTS	18	/* N 32-bit words worth of info */ -#define NBUGINTS	1	/* N 32-bit bug flags */ +#define NCAPINTS			18	   /* N 32-bit words worth of info */ +#define NBUGINTS			1	   /* N 32-bit bug flags */  /*   * Note: If the comment begins with a quoted string, that string is used   * in /proc/cpuinfo instead of the macro name.  If the string is "",   * this feature bit is not displayed in /proc/cpuinfo at all. + * + * When adding new features here that depend on other features, + * please update the table in kernel/cpu/cpuid-deps.c as well.   */ -/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ -#define X86_FEATURE_FPU		( 0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME		( 0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE		( 0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE		( 0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC		( 0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR		( 0*32+ 5) /* Model-Specific Registers */ -#define X86_FEATURE_PAE		( 0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE		( 0*32+ 7) /* Machine Check Exception */ -#define X86_FEATURE_CX8		( 0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC	( 0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP		( 0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR	( 0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE		( 0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA		( 0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV	( 0*32+15) /* CMOV instructions */ -					  /* (plus FCMOVcc, FCOMI with FPU) */ -#define X86_FEATURE_PAT		( 0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36	( 0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN		( 0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLUSH	( 0*32+19) /* CLFLUSH instruction */ -#define X86_FEATURE_DS		( 0*32+21) /* "dts" Debug Store */ -#define X86_FEATURE_ACPI	( 0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX		( 0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR	( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ -#define X86_FEATURE_XMM		( 0*32+25) /* "sse" */ -#define X86_FEATURE_XMM2	( 0*32+26) /* "sse2" */ -#define X86_FEATURE_SELFSNOOP	( 0*32+27) /* "ss" CPU self snoop */ -#define X86_FEATURE_HT		( 0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC		( 0*32+29) /* "tm" Automatic clock control */ -#define X86_FEATURE_IA64	( 0*32+30) /* IA-64 processor */ -#define X86_FEATURE_PBE		( 0*32+31) /* Pending Break Enable */ +/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */ +#define X86_FEATURE_FPU			( 0*32+ 0) /* Onboard FPU */ +#define X86_FEATURE_VME			( 0*32+ 1) /* Virtual Mode Extensions */ +#define X86_FEATURE_DE			( 0*32+ 2) /* Debugging Extensions */ +#define X86_FEATURE_PSE			( 0*32+ 3) /* Page Size Extensions */ +#define X86_FEATURE_TSC			( 0*32+ 4) /* Time Stamp Counter */ +#define X86_FEATURE_MSR			( 0*32+ 5) /* Model-Specific Registers */ +#define X86_FEATURE_PAE			( 0*32+ 6) /* Physical Address Extensions */ +#define X86_FEATURE_MCE			( 0*32+ 7) /* Machine Check Exception */ +#define X86_FEATURE_CX8			( 0*32+ 8) /* CMPXCHG8 instruction */ +#define X86_FEATURE_APIC		( 0*32+ 9) /* Onboard APIC */ +#define X86_FEATURE_SEP			( 0*32+11) /* SYSENTER/SYSEXIT */ +#define X86_FEATURE_MTRR		( 0*32+12) /* Memory Type Range Registers */ +#define X86_FEATURE_PGE			( 0*32+13) /* Page Global Enable */ +#define X86_FEATURE_MCA			( 0*32+14) /* Machine Check Architecture */ +#define X86_FEATURE_CMOV		( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */ +#define X86_FEATURE_PAT			( 0*32+16) /* Page Attribute Table */ +#define X86_FEATURE_PSE36		( 0*32+17) /* 36-bit PSEs */ +#define X86_FEATURE_PN			( 0*32+18) /* Processor serial number */ +#define X86_FEATURE_CLFLUSH		( 0*32+19) /* CLFLUSH instruction */ +#define X86_FEATURE_DS			( 0*32+21) /* "dts" Debug Store */ +#define X86_FEATURE_ACPI		( 0*32+22) /* ACPI via MSR */ +#define X86_FEATURE_MMX			( 0*32+23) /* Multimedia Extensions */ +#define X86_FEATURE_FXSR		( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ +#define X86_FEATURE_XMM			( 0*32+25) /* "sse" */ +#define X86_FEATURE_XMM2		( 0*32+26) /* "sse2" */ +#define X86_FEATURE_SELFSNOOP		( 0*32+27) /* "ss" CPU self snoop */ +#define X86_FEATURE_HT			( 0*32+28) /* Hyper-Threading */ +#define X86_FEATURE_ACC			( 0*32+29) /* "tm" Automatic clock control */ +#define X86_FEATURE_IA64		( 0*32+30) /* IA-64 processor */ +#define X86_FEATURE_PBE			( 0*32+31) /* Pending Break Enable */  /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */  /* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL	( 1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP		( 1*32+19) /* MP Capable. */ -#define X86_FEATURE_NX		( 1*32+20) /* Execute Disable */ -#define X86_FEATURE_MMXEXT	( 1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_FXSR_OPT	( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ -#define X86_FEATURE_GBPAGES	( 1*32+26) /* "pdpe1gb" GB pages */ -#define X86_FEATURE_RDTSCP	( 1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM		( 1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT	( 1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW	( 1*32+31) /* 3DNow! */ +#define X86_FEATURE_SYSCALL		( 1*32+11) /* SYSCALL/SYSRET */ +#define X86_FEATURE_MP			( 1*32+19) /* MP Capable */ +#define X86_FEATURE_NX			( 1*32+20) /* Execute Disable */ +#define X86_FEATURE_MMXEXT		( 1*32+22) /* AMD MMX extensions */ +#define X86_FEATURE_FXSR_OPT		( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ +#define X86_FEATURE_GBPAGES		( 1*32+26) /* "pdpe1gb" GB pages */ +#define X86_FEATURE_RDTSCP		( 1*32+27) /* RDTSCP */ +#define X86_FEATURE_LM			( 1*32+29) /* Long Mode (x86-64, 64-bit support) */ +#define X86_FEATURE_3DNOWEXT		( 1*32+30) /* AMD 3DNow extensions */ +#define X86_FEATURE_3DNOW		( 1*32+31) /* 3DNow */  /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY	( 2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN	( 2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI	( 2*32+ 3) /* LongRun table interface */ +#define X86_FEATURE_RECOVERY		( 2*32+ 0) /* CPU in recovery mode */ +#define X86_FEATURE_LONGRUN		( 2*32+ 1) /* Longrun power control */ +#define X86_FEATURE_LRTI		( 2*32+ 3) /* LongRun table interface */  /* Other features, Linux-defined mapping, word 3 */  /* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX	( 3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR	( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR	( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR	( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ -/* cpu types for specific tunings: */ -#define X86_FEATURE_K8		( 3*32+ 4) /* "" Opteron, Athlon64 */ -#define X86_FEATURE_K7		( 3*32+ 5) /* "" Athlon */ -#define X86_FEATURE_P3		( 3*32+ 6) /* "" P3 */ -#define X86_FEATURE_P4		( 3*32+ 7) /* "" P4 */ -#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP		( 3*32+ 9) /* smp kernel running on up */ -#define X86_FEATURE_ART		( 3*32+10) /* Platform has always running timer (ART) */ -#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS	( 3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS		( 3*32+13) /* Branch Trace Store */ -#define X86_FEATURE_SYSCALL32	( 3*32+14) /* "" syscall in ia32 userspace */ -#define X86_FEATURE_SYSENTER32	( 3*32+15) /* "" sysenter in ia32 userspace */ -#define X86_FEATURE_REP_GOOD	( 3*32+16) /* rep microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ -#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ -#define X86_FEATURE_ACC_POWER	( 3*32+19) /* AMD Accumulated Power Mechanism */ -#define X86_FEATURE_NOPL	( 3*32+20) /* The NOPL (0F 1F) instructions */ -#define X86_FEATURE_ALWAYS	( 3*32+21) /* "" Always-present feature */ -#define X86_FEATURE_XTOPOLOGY	( 3*32+22) /* cpu topology enum extensions */ -#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ -#define X86_FEATURE_NONSTOP_TSC	( 3*32+24) /* TSC does not stop in C states */ -#define X86_FEATURE_CPUID	( 3*32+25) /* CPU has CPUID instruction itself */ -#define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */ -#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */ -#define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */ -#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ -#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ +#define X86_FEATURE_CXMMX		( 3*32+ 0) /* Cyrix MMX extensions */ +#define X86_FEATURE_K6_MTRR		( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ +#define X86_FEATURE_CYRIX_ARR		( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ +#define X86_FEATURE_CENTAUR_MCR		( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ + +/* CPU types for specific tunings: */ +#define X86_FEATURE_K8			( 3*32+ 4) /* "" Opteron, Athlon64 */ +#define X86_FEATURE_K7			( 3*32+ 5) /* "" Athlon */ +#define X86_FEATURE_P3			( 3*32+ 6) /* "" P3 */ +#define X86_FEATURE_P4			( 3*32+ 7) /* "" P4 */ +#define X86_FEATURE_CONSTANT_TSC	( 3*32+ 8) /* TSC ticks at a constant rate */ +#define X86_FEATURE_UP			( 3*32+ 9) /* SMP kernel running on UP */ +#define X86_FEATURE_ART			( 3*32+10) /* Always running timer (ART) */ +#define X86_FEATURE_ARCH_PERFMON	( 3*32+11) /* Intel Architectural PerfMon */ +#define X86_FEATURE_PEBS		( 3*32+12) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS			( 3*32+13) /* Branch Trace Store */ +#define X86_FEATURE_SYSCALL32		( 3*32+14) /* "" syscall in IA32 userspace */ +#define X86_FEATURE_SYSENTER32		( 3*32+15) /* "" sysenter in IA32 userspace */ +#define X86_FEATURE_REP_GOOD		( 3*32+16) /* REP microcode works well */ +#define X86_FEATURE_MFENCE_RDTSC	( 3*32+17) /* "" MFENCE synchronizes RDTSC */ +#define X86_FEATURE_LFENCE_RDTSC	( 3*32+18) /* "" LFENCE synchronizes RDTSC */ +#define X86_FEATURE_ACC_POWER		( 3*32+19) /* AMD Accumulated Power Mechanism */ +#define X86_FEATURE_NOPL		( 3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_ALWAYS		( 3*32+21) /* "" Always-present feature */ +#define X86_FEATURE_XTOPOLOGY		( 3*32+22) /* CPU topology enum extensions */ +#define X86_FEATURE_TSC_RELIABLE	( 3*32+23) /* TSC is known to be reliable */ +#define X86_FEATURE_NONSTOP_TSC		( 3*32+24) /* TSC does not stop in C states */ +#define X86_FEATURE_CPUID		( 3*32+25) /* CPU has CPUID instruction itself */ +#define X86_FEATURE_EXTD_APICID		( 3*32+26) /* Extended APICID (8 bits) */ +#define X86_FEATURE_AMD_DCM		( 3*32+27) /* AMD multi-node processor */ +#define X86_FEATURE_APERFMPERF		( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */ +#define X86_FEATURE_NONSTOP_TSC_S3	( 3*32+30) /* TSC doesn't stop in S3 state */ +#define X86_FEATURE_TSC_KNOWN_FREQ	( 3*32+31) /* TSC has known frequency */ -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3	( 4*32+ 0) /* "pni" SSE-3 */ -#define X86_FEATURE_PCLMULQDQ	( 4*32+ 1) /* PCLMULQDQ instruction */ -#define X86_FEATURE_DTES64	( 4*32+ 2) /* 64-bit Debug Store */ -#define X86_FEATURE_MWAIT	( 4*32+ 3) /* "monitor" Monitor/Mwait support */ -#define X86_FEATURE_DSCPL	( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ -#define X86_FEATURE_VMX		( 4*32+ 5) /* Hardware virtualization */ -#define X86_FEATURE_SMX		( 4*32+ 6) /* Safer mode */ -#define X86_FEATURE_EST		( 4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2		( 4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_SSSE3	( 4*32+ 9) /* Supplemental SSE-3 */ -#define X86_FEATURE_CID		( 4*32+10) /* Context ID */ -#define X86_FEATURE_SDBG	( 4*32+11) /* Silicon Debug */ -#define X86_FEATURE_FMA		( 4*32+12) /* Fused multiply-add */ -#define X86_FEATURE_CX16	( 4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR	( 4*32+14) /* Send Task Priority Messages */ -#define X86_FEATURE_PDCM	( 4*32+15) /* Performance Capabilities */ -#define X86_FEATURE_PCID	( 4*32+17) /* Process Context Identifiers */ -#define X86_FEATURE_DCA		( 4*32+18) /* Direct Cache Access */ -#define X86_FEATURE_XMM4_1	( 4*32+19) /* "sse4_1" SSE-4.1 */ -#define X86_FEATURE_XMM4_2	( 4*32+20) /* "sse4_2" SSE-4.2 */ -#define X86_FEATURE_X2APIC	( 4*32+21) /* x2APIC */ -#define X86_FEATURE_MOVBE	( 4*32+22) /* MOVBE instruction */ -#define X86_FEATURE_POPCNT      ( 4*32+23) /* POPCNT instruction */ -#define X86_FEATURE_TSC_DEADLINE_TIMER	( 4*32+24) /* Tsc deadline timer */ -#define X86_FEATURE_AES		( 4*32+25) /* AES instructions */ -#define X86_FEATURE_XSAVE	( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ -#define X86_FEATURE_OSXSAVE	( 4*32+27) /* "" XSAVE enabled in the OS */ -#define X86_FEATURE_AVX		( 4*32+28) /* Advanced Vector Extensions */ -#define X86_FEATURE_F16C	( 4*32+29) /* 16-bit fp conversions */ -#define X86_FEATURE_RDRAND	( 4*32+30) /* The RDRAND instruction */ -#define X86_FEATURE_HYPERVISOR	( 4*32+31) /* Running on a hypervisor */ +/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */ +#define X86_FEATURE_XMM3		( 4*32+ 0) /* "pni" SSE-3 */ +#define X86_FEATURE_PCLMULQDQ		( 4*32+ 1) /* PCLMULQDQ instruction */ +#define X86_FEATURE_DTES64		( 4*32+ 2) /* 64-bit Debug Store */ +#define X86_FEATURE_MWAIT		( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */ +#define X86_FEATURE_DSCPL		( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */ +#define X86_FEATURE_VMX			( 4*32+ 5) /* Hardware virtualization */ +#define X86_FEATURE_SMX			( 4*32+ 6) /* Safer Mode eXtensions */ +#define X86_FEATURE_EST			( 4*32+ 7) /* Enhanced SpeedStep */ +#define X86_FEATURE_TM2			( 4*32+ 8) /* Thermal Monitor 2 */ +#define X86_FEATURE_SSSE3		( 4*32+ 9) /* Supplemental SSE-3 */ +#define X86_FEATURE_CID			( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG		( 4*32+11) /* Silicon Debug */ +#define X86_FEATURE_FMA			( 4*32+12) /* Fused multiply-add */ +#define X86_FEATURE_CX16		( 4*32+13) /* CMPXCHG16B instruction */ +#define X86_FEATURE_XTPR		( 4*32+14) /* Send Task Priority Messages */ +#define X86_FEATURE_PDCM		( 4*32+15) /* Perf/Debug Capabilities MSR */ +#define X86_FEATURE_PCID		( 4*32+17) /* Process Context Identifiers */ +#define X86_FEATURE_DCA			( 4*32+18) /* Direct Cache Access */ +#define X86_FEATURE_XMM4_1		( 4*32+19) /* "sse4_1" SSE-4.1 */ +#define X86_FEATURE_XMM4_2		( 4*32+20) /* "sse4_2" SSE-4.2 */ +#define X86_FEATURE_X2APIC		( 4*32+21) /* X2APIC */ +#define X86_FEATURE_MOVBE		( 4*32+22) /* MOVBE instruction */ +#define X86_FEATURE_POPCNT		( 4*32+23) /* POPCNT instruction */ +#define X86_FEATURE_TSC_DEADLINE_TIMER	( 4*32+24) /* TSC deadline timer */ +#define X86_FEATURE_AES			( 4*32+25) /* AES instructions */ +#define X86_FEATURE_XSAVE		( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */ +#define X86_FEATURE_OSXSAVE		( 4*32+27) /* "" XSAVE instruction enabled in the OS */ +#define X86_FEATURE_AVX			( 4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_F16C		( 4*32+29) /* 16-bit FP conversions */ +#define X86_FEATURE_RDRAND		( 4*32+30) /* RDRAND instruction */ +#define X86_FEATURE_HYPERVISOR		( 4*32+31) /* Running on a hypervisor */  /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE	( 5*32+ 2) /* "rng" RNG present (xstore) */ -#define X86_FEATURE_XSTORE_EN	( 5*32+ 3) /* "rng_en" RNG enabled */ -#define X86_FEATURE_XCRYPT	( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ -#define X86_FEATURE_XCRYPT_EN	( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ -#define X86_FEATURE_ACE2	( 5*32+ 8) /* Advanced Cryptography Engine v2 */ -#define X86_FEATURE_ACE2_EN	( 5*32+ 9) /* ACE v2 enabled */ -#define X86_FEATURE_PHE		( 5*32+10) /* PadLock Hash Engine */ -#define X86_FEATURE_PHE_EN	( 5*32+11) /* PHE enabled */ -#define X86_FEATURE_PMM		( 5*32+12) /* PadLock Montgomery Multiplier */ -#define X86_FEATURE_PMM_EN	( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_XSTORE		( 5*32+ 2) /* "rng" RNG present (xstore) */ +#define X86_FEATURE_XSTORE_EN		( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_XCRYPT		( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ +#define X86_FEATURE_XCRYPT_EN		( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ +#define X86_FEATURE_ACE2		( 5*32+ 8) /* Advanced Cryptography Engine v2 */ +#define X86_FEATURE_ACE2_EN		( 5*32+ 9) /* ACE v2 enabled */ +#define X86_FEATURE_PHE			( 5*32+10) /* PadLock Hash Engine */ +#define X86_FEATURE_PHE_EN		( 5*32+11) /* PHE enabled */ +#define X86_FEATURE_PMM			( 5*32+12) /* PadLock Montgomery Multiplier */ +#define X86_FEATURE_PMM_EN		( 5*32+13) /* PMM enabled */ -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM	( 6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY	( 6*32+ 1) /* If yes HyperThreading not valid */ -#define X86_FEATURE_SVM		( 6*32+ 2) /* Secure virtual machine */ -#define X86_FEATURE_EXTAPIC	( 6*32+ 3) /* Extended APIC space */ -#define X86_FEATURE_CR8_LEGACY	( 6*32+ 4) /* CR8 in 32-bit mode */ -#define X86_FEATURE_ABM		( 6*32+ 5) /* Advanced bit manipulation */ -#define X86_FEATURE_SSE4A	( 6*32+ 6) /* SSE-4A */ -#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ -#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ -#define X86_FEATURE_OSVW	( 6*32+ 9) /* OS Visible Workaround */ -#define X86_FEATURE_IBS		( 6*32+10) /* Instruction Based Sampling */ -#define X86_FEATURE_XOP		( 6*32+11) /* extended AVX instructions */ -#define X86_FEATURE_SKINIT	( 6*32+12) /* SKINIT/STGI instructions */ -#define X86_FEATURE_WDT		( 6*32+13) /* Watchdog timer */ -#define X86_FEATURE_LWP		( 6*32+15) /* Light Weight Profiling */ -#define X86_FEATURE_FMA4	( 6*32+16) /* 4 operands MAC instructions */ -#define X86_FEATURE_TCE		( 6*32+17) /* translation cache extension */ -#define X86_FEATURE_NODEID_MSR	( 6*32+19) /* NodeId MSR */ -#define X86_FEATURE_TBM		( 6*32+21) /* trailing bit manipulations */ -#define X86_FEATURE_TOPOEXT	( 6*32+22) /* topology extensions CPUID leafs */ -#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ -#define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */ -#define X86_FEATURE_BPEXT	(6*32+26) /* data breakpoint extension */ -#define X86_FEATURE_PTSC	( 6*32+27) /* performance time-stamp counter */ -#define X86_FEATURE_PERFCTR_LLC	( 6*32+28) /* Last Level Cache performance counter extensions */ -#define X86_FEATURE_MWAITX	( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ +/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ +#define X86_FEATURE_LAHF_LM		( 6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY		( 6*32+ 1) /* If yes HyperThreading not valid */ +#define X86_FEATURE_SVM			( 6*32+ 2) /* Secure Virtual Machine */ +#define X86_FEATURE_EXTAPIC		( 6*32+ 3) /* Extended APIC space */ +#define X86_FEATURE_CR8_LEGACY		( 6*32+ 4) /* CR8 in 32-bit mode */ +#define X86_FEATURE_ABM			( 6*32+ 5) /* Advanced bit manipulation */ +#define X86_FEATURE_SSE4A		( 6*32+ 6) /* SSE-4A */ +#define X86_FEATURE_MISALIGNSSE		( 6*32+ 7) /* Misaligned SSE mode */ +#define X86_FEATURE_3DNOWPREFETCH	( 6*32+ 8) /* 3DNow prefetch instructions */ +#define X86_FEATURE_OSVW		( 6*32+ 9) /* OS Visible Workaround */ +#define X86_FEATURE_IBS			( 6*32+10) /* Instruction Based Sampling */ +#define X86_FEATURE_XOP			( 6*32+11) /* extended AVX instructions */ +#define X86_FEATURE_SKINIT		( 6*32+12) /* SKINIT/STGI instructions */ +#define X86_FEATURE_WDT			( 6*32+13) /* Watchdog timer */ +#define X86_FEATURE_LWP			( 6*32+15) /* Light Weight Profiling */ +#define X86_FEATURE_FMA4		( 6*32+16) /* 4 operands MAC instructions */ +#define X86_FEATURE_TCE			( 6*32+17) /* Translation Cache Extension */ +#define X86_FEATURE_NODEID_MSR		( 6*32+19) /* NodeId MSR */ +#define X86_FEATURE_TBM			( 6*32+21) /* Trailing Bit Manipulations */ +#define X86_FEATURE_TOPOEXT		( 6*32+22) /* Topology extensions CPUID leafs */ +#define X86_FEATURE_PERFCTR_CORE	( 6*32+23) /* Core performance counter extensions */ +#define X86_FEATURE_PERFCTR_NB		( 6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_BPEXT		( 6*32+26) /* Data breakpoint extension */ +#define X86_FEATURE_PTSC		( 6*32+27) /* Performance time-stamp counter */ +#define X86_FEATURE_PERFCTR_LLC		( 6*32+28) /* Last Level Cache performance counter extensions */ +#define X86_FEATURE_MWAITX		( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */  /*   * Auxiliary flags: Linux defined - For features scattered in various @@ -187,146 +190,155 @@   *   * Reuse free bits when adding new feature flags!   */ -#define X86_FEATURE_RING3MWAIT	( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ -#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ -#define X86_FEATURE_CPB		( 7*32+ 2) /* AMD Core Performance Boost */ -#define X86_FEATURE_EPB		( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ -#define X86_FEATURE_CAT_L3	( 7*32+ 4) /* Cache Allocation Technology L3 */ -#define X86_FEATURE_CAT_L2	( 7*32+ 5) /* Cache Allocation Technology L2 */ -#define X86_FEATURE_CDP_L3	( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_RING3MWAIT		( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */ +#define X86_FEATURE_CPUID_FAULT		( 7*32+ 1) /* Intel CPUID faulting */ +#define X86_FEATURE_CPB			( 7*32+ 2) /* AMD Core Performance Boost */ +#define X86_FEATURE_EPB			( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ +#define X86_FEATURE_CAT_L3		( 7*32+ 4) /* Cache Allocation Technology L3 */ +#define X86_FEATURE_CAT_L2		( 7*32+ 5) /* Cache Allocation Technology L2 */ +#define X86_FEATURE_CDP_L3		( 7*32+ 6) /* Code and Data Prioritization L3 */ -#define X86_FEATURE_HW_PSTATE	( 7*32+ 8) /* AMD HW-PState */ -#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ -#define X86_FEATURE_SME		( 7*32+10) /* AMD Secure Memory Encryption */ +#define X86_FEATURE_HW_PSTATE		( 7*32+ 8) /* AMD HW-PState */ +#define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */ +#define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */ -#define X86_FEATURE_INTEL_PPIN	( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PT	( 7*32+15) /* Intel Processor Trace */ -#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ -#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */ +#define X86_FEATURE_INTEL_PT		( 7*32+15) /* Intel Processor Trace */ +#define X86_FEATURE_AVX512_4VNNIW	( 7*32+16) /* AVX-512 Neural Network Instructions */ +#define X86_FEATURE_AVX512_4FMAPS	( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ -#define X86_FEATURE_MBA         ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */  /* Virtualization flags: Linux defined, word 8 */ -#define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */ -#define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */ -#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ -#define X86_FEATURE_EPT         ( 8*32+ 3) /* Intel Extended Page Table */ -#define X86_FEATURE_VPID        ( 8*32+ 4) /* Intel Virtual Processor ID */ +#define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */ +#define X86_FEATURE_VNMI		( 8*32+ 1) /* Intel Virtual NMI */ +#define X86_FEATURE_FLEXPRIORITY	( 8*32+ 2) /* Intel FlexPriority */ +#define X86_FEATURE_EPT			( 8*32+ 3) /* Intel Extended Page Table */ +#define X86_FEATURE_VPID		( 8*32+ 4) /* Intel Virtual Processor ID */ -#define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */ -#define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */ +#define X86_FEATURE_VMMCALL		( 8*32+15) /* Prefer VMMCALL to VMCALL */ +#define X86_FEATURE_XENPV		( 8*32+16) /* "" Xen paravirtual guest */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ -#define X86_FEATURE_FSGSBASE	( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ -#define X86_FEATURE_TSC_ADJUST	( 9*32+ 1) /* TSC adjustment MSR 0x3b */ -#define X86_FEATURE_BMI1	( 9*32+ 3) /* 1st group bit manipulation extensions */ -#define X86_FEATURE_HLE		( 9*32+ 4) /* Hardware Lock Elision */ -#define X86_FEATURE_AVX2	( 9*32+ 5) /* AVX2 instructions */ -#define X86_FEATURE_SMEP	( 9*32+ 7) /* Supervisor Mode Execution Protection */ -#define X86_FEATURE_BMI2	( 9*32+ 8) /* 2nd group bit manipulation extensions */ -#define X86_FEATURE_ERMS	( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ -#define X86_FEATURE_INVPCID	( 9*32+10) /* Invalidate Processor Context ID */ -#define X86_FEATURE_RTM		( 9*32+11) /* Restricted Transactional Memory */ -#define X86_FEATURE_CQM		( 9*32+12) /* Cache QoS Monitoring */ -#define X86_FEATURE_MPX		( 9*32+14) /* Memory Protection Extension */ -#define X86_FEATURE_RDT_A	( 9*32+15) /* Resource Director Technology Allocation */ -#define X86_FEATURE_AVX512F	( 9*32+16) /* AVX-512 Foundation */ -#define X86_FEATURE_AVX512DQ	( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ -#define X86_FEATURE_RDSEED	( 9*32+18) /* The RDSEED instruction */ -#define X86_FEATURE_ADX		( 9*32+19) /* The ADCX and ADOX instructions */ -#define X86_FEATURE_SMAP	( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_AVX512IFMA  ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ -#define X86_FEATURE_CLFLUSHOPT	( 9*32+23) /* CLFLUSHOPT instruction */ -#define X86_FEATURE_CLWB	( 9*32+24) /* CLWB instruction */ -#define X86_FEATURE_AVX512PF	( 9*32+26) /* AVX-512 Prefetch */ -#define X86_FEATURE_AVX512ER	( 9*32+27) /* AVX-512 Exponential and Reciprocal */ -#define X86_FEATURE_AVX512CD	( 9*32+28) /* AVX-512 Conflict Detection */ -#define X86_FEATURE_SHA_NI	( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ -#define X86_FEATURE_AVX512BW	( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ -#define X86_FEATURE_AVX512VL	( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ +#define X86_FEATURE_FSGSBASE		( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ +#define X86_FEATURE_TSC_ADJUST		( 9*32+ 1) /* TSC adjustment MSR 0x3B */ +#define X86_FEATURE_BMI1		( 9*32+ 3) /* 1st group bit manipulation extensions */ +#define X86_FEATURE_HLE			( 9*32+ 4) /* Hardware Lock Elision */ +#define X86_FEATURE_AVX2		( 9*32+ 5) /* AVX2 instructions */ +#define X86_FEATURE_SMEP		( 9*32+ 7) /* Supervisor Mode Execution Protection */ +#define X86_FEATURE_BMI2		( 9*32+ 8) /* 2nd group bit manipulation extensions */ +#define X86_FEATURE_ERMS		( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ +#define X86_FEATURE_INVPCID		( 9*32+10) /* Invalidate Processor Context ID */ +#define X86_FEATURE_RTM			( 9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_CQM			( 9*32+12) /* Cache QoS Monitoring */ +#define X86_FEATURE_MPX			( 9*32+14) /* Memory Protection Extension */ +#define X86_FEATURE_RDT_A		( 9*32+15) /* Resource Director Technology Allocation */ +#define X86_FEATURE_AVX512F		( 9*32+16) /* AVX-512 Foundation */ +#define X86_FEATURE_AVX512DQ		( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ +#define X86_FEATURE_RDSEED		( 9*32+18) /* RDSEED instruction */ +#define X86_FEATURE_ADX			( 9*32+19) /* ADCX and ADOX instructions */ +#define X86_FEATURE_SMAP		( 9*32+20) /* Supervisor Mode Access Prevention */ +#define X86_FEATURE_AVX512IFMA		( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ +#define X86_FEATURE_CLFLUSHOPT		( 9*32+23) /* CLFLUSHOPT instruction */ +#define X86_FEATURE_CLWB		( 9*32+24) /* CLWB instruction */ +#define X86_FEATURE_AVX512PF		( 9*32+26) /* AVX-512 Prefetch */ +#define X86_FEATURE_AVX512ER		( 9*32+27) /* AVX-512 Exponential and Reciprocal */ +#define X86_FEATURE_AVX512CD		( 9*32+28) /* AVX-512 Conflict Detection */ +#define X86_FEATURE_SHA_NI		( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ +#define X86_FEATURE_AVX512BW		( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ +#define X86_FEATURE_AVX512VL		( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ -/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ -#define X86_FEATURE_XSAVEOPT	(10*32+ 0) /* XSAVEOPT */ -#define X86_FEATURE_XSAVEC	(10*32+ 1) /* XSAVEC */ -#define X86_FEATURE_XGETBV1	(10*32+ 2) /* XGETBV with ECX = 1 */ -#define X86_FEATURE_XSAVES	(10*32+ 3) /* XSAVES/XRSTORS */ +/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */ +#define X86_FEATURE_XSAVEOPT		(10*32+ 0) /* XSAVEOPT instruction */ +#define X86_FEATURE_XSAVEC		(10*32+ 1) /* XSAVEC instruction */ +#define X86_FEATURE_XGETBV1		(10*32+ 2) /* XGETBV with ECX = 1 instruction */ +#define X86_FEATURE_XSAVES		(10*32+ 3) /* XSAVES/XRSTORS instructions */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ -#define X86_FEATURE_CQM_LLC	(11*32+ 1) /* LLC QoS if 1 */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */ +#define X86_FEATURE_CQM_LLC		(11*32+ 1) /* LLC QoS if 1 */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ -#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ -#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ -#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */ +#define X86_FEATURE_CQM_OCCUP_LLC	(12*32+ 0) /* LLC occupancy monitoring */ +#define X86_FEATURE_CQM_MBM_TOTAL	(12*32+ 1) /* LLC Total MBM monitoring */ +#define X86_FEATURE_CQM_MBM_LOCAL	(12*32+ 2) /* LLC Local MBM monitoring */ -/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ -#define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */ -#define X86_FEATURE_IRPERF	(13*32+1) /* Instructions Retired Count */ +/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ +#define X86_FEATURE_CLZERO		(13*32+ 0) /* CLZERO instruction */ +#define X86_FEATURE_IRPERF		(13*32+ 1) /* Instructions Retired Count */ +#define X86_FEATURE_XSAVEERPTR		(13*32+ 2) /* Always save/restore FP error pointers */ -/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ -#define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */ -#define X86_FEATURE_IDA		(14*32+ 1) /* Intel Dynamic Acceleration */ -#define X86_FEATURE_ARAT	(14*32+ 2) /* Always Running APIC Timer */ -#define X86_FEATURE_PLN		(14*32+ 4) /* Intel Power Limit Notification */ -#define X86_FEATURE_PTS		(14*32+ 6) /* Intel Package Thermal Status */ -#define X86_FEATURE_HWP		(14*32+ 7) /* Intel Hardware P-states */ -#define X86_FEATURE_HWP_NOTIFY	(14*32+ 8) /* HWP Notification */ -#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ -#define X86_FEATURE_HWP_EPP	(14*32+10) /* HWP Energy Perf. Preference */ -#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ +/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ +#define X86_FEATURE_DTHERM		(14*32+ 0) /* Digital Thermal Sensor */ +#define X86_FEATURE_IDA			(14*32+ 1) /* Intel Dynamic Acceleration */ +#define X86_FEATURE_ARAT		(14*32+ 2) /* Always Running APIC Timer */ +#define X86_FEATURE_PLN			(14*32+ 4) /* Intel Power Limit Notification */ +#define X86_FEATURE_PTS			(14*32+ 6) /* Intel Package Thermal Status */ +#define X86_FEATURE_HWP			(14*32+ 7) /* Intel Hardware P-states */ +#define X86_FEATURE_HWP_NOTIFY		(14*32+ 8) /* HWP Notification */ +#define X86_FEATURE_HWP_ACT_WINDOW	(14*32+ 9) /* HWP Activity Window */ +#define X86_FEATURE_HWP_EPP		(14*32+10) /* HWP Energy Perf. Preference */ +#define X86_FEATURE_HWP_PKG_REQ		(14*32+11) /* HWP Package Level Request */ -/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ -#define X86_FEATURE_NPT		(15*32+ 0) /* Nested Page Table support */ -#define X86_FEATURE_LBRV	(15*32+ 1) /* LBR Virtualization support */ -#define X86_FEATURE_SVML	(15*32+ 2) /* "svm_lock" SVM locking MSR */ -#define X86_FEATURE_NRIPS	(15*32+ 3) /* "nrip_save" SVM next_rip save */ -#define X86_FEATURE_TSCRATEMSR  (15*32+ 4) /* "tsc_scale" TSC scaling support */ -#define X86_FEATURE_VMCBCLEAN   (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ -#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ -#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ -#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ -#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ -#define X86_FEATURE_AVIC	(15*32+13) /* Virtual Interrupt Controller */ -#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ -#define X86_FEATURE_VGIF	(15*32+16) /* Virtual GIF */ +/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ +#define X86_FEATURE_NPT			(15*32+ 0) /* Nested Page Table support */ +#define X86_FEATURE_LBRV		(15*32+ 1) /* LBR Virtualization support */ +#define X86_FEATURE_SVML		(15*32+ 2) /* "svm_lock" SVM locking MSR */ +#define X86_FEATURE_NRIPS		(15*32+ 3) /* "nrip_save" SVM next_rip save */ +#define X86_FEATURE_TSCRATEMSR		(15*32+ 4) /* "tsc_scale" TSC scaling support */ +#define X86_FEATURE_VMCBCLEAN		(15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ +#define X86_FEATURE_FLUSHBYASID		(15*32+ 6) /* flush-by-ASID support */ +#define X86_FEATURE_DECODEASSISTS	(15*32+ 7) /* Decode Assists support */ +#define X86_FEATURE_PAUSEFILTER		(15*32+10) /* filtered pause intercept */ +#define X86_FEATURE_PFTHRESHOLD		(15*32+12) /* pause filter threshold */ +#define X86_FEATURE_AVIC		(15*32+13) /* Virtual Interrupt Controller */ +#define X86_FEATURE_V_VMSAVE_VMLOAD	(15*32+15) /* Virtual VMSAVE VMLOAD */ +#define X86_FEATURE_VGIF		(15*32+16) /* Virtual GIF */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ -#define X86_FEATURE_AVX512VBMI  (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ -#define X86_FEATURE_PKU		(16*32+ 3) /* Protection Keys for Userspace */ -#define X86_FEATURE_OSPKE	(16*32+ 4) /* OS Protection Keys Enable */ -#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ -#define X86_FEATURE_LA57	(16*32+16) /* 5-level page tables */ -#define X86_FEATURE_RDPID	(16*32+22) /* RDPID instruction */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ +#define X86_FEATURE_AVX512VBMI		(16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ +#define X86_FEATURE_UMIP		(16*32+ 2) /* User Mode Instruction Protection */ +#define X86_FEATURE_PKU			(16*32+ 3) /* Protection Keys for Userspace */ +#define X86_FEATURE_OSPKE		(16*32+ 4) /* OS Protection Keys Enable */ +#define X86_FEATURE_AVX512_VBMI2	(16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ +#define X86_FEATURE_GFNI		(16*32+ 8) /* Galois Field New Instructions */ +#define X86_FEATURE_VAES		(16*32+ 9) /* Vector AES */ +#define X86_FEATURE_VPCLMULQDQ		(16*32+10) /* Carry-Less Multiplication Double Quadword */ +#define X86_FEATURE_AVX512_VNNI		(16*32+11) /* Vector Neural Network Instructions */ +#define X86_FEATURE_AVX512_BITALG	(16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ +#define X86_FEATURE_AVX512_VPOPCNTDQ	(16*32+14) /* POPCNT for vectors of DW/QW */ +#define X86_FEATURE_LA57		(16*32+16) /* 5-level page tables */ +#define X86_FEATURE_RDPID		(16*32+22) /* RDPID instruction */ -/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ -#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ -#define X86_FEATURE_SUCCOR	(17*32+1) /* Uncorrectable error containment and recovery */ -#define X86_FEATURE_SMCA	(17*32+3) /* Scalable MCA */ +/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ +#define X86_FEATURE_OVERFLOW_RECOV	(17*32+ 0) /* MCA overflow recovery support */ +#define X86_FEATURE_SUCCOR		(17*32+ 1) /* Uncorrectable error containment and recovery */ +#define X86_FEATURE_SMCA		(17*32+ 3) /* Scalable MCA */  /*   * BUG word(s)   */ -#define X86_BUG(x)		(NCAPINTS*32 + (x)) +#define X86_BUG(x)			(NCAPINTS*32 + (x)) -#define X86_BUG_F00F		X86_BUG(0) /* Intel F00F */ -#define X86_BUG_FDIV		X86_BUG(1) /* FPU FDIV */ -#define X86_BUG_COMA		X86_BUG(2) /* Cyrix 6x86 coma */ -#define X86_BUG_AMD_TLB_MMATCH	X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ -#define X86_BUG_AMD_APIC_C1E	X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ -#define X86_BUG_11AP		X86_BUG(5) /* Bad local APIC aka 11AP */ -#define X86_BUG_FXSAVE_LEAK	X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ -#define X86_BUG_CLFLUSH_MONITOR	X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ -#define X86_BUG_SYSRET_SS_ATTRS	X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ +#define X86_BUG_F00F			X86_BUG(0) /* Intel F00F */ +#define X86_BUG_FDIV			X86_BUG(1) /* FPU FDIV */ +#define X86_BUG_COMA			X86_BUG(2) /* Cyrix 6x86 coma */ +#define X86_BUG_AMD_TLB_MMATCH		X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ +#define X86_BUG_AMD_APIC_C1E		X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ +#define X86_BUG_11AP			X86_BUG(5) /* Bad local APIC aka 11AP */ +#define X86_BUG_FXSAVE_LEAK		X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_BUG_CLFLUSH_MONITOR		X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ +#define X86_BUG_SYSRET_SS_ATTRS		X86_BUG(8) /* SYSRET doesn't fix up SS attrs */  #ifdef CONFIG_X86_32  /*   * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional   * to avoid confusion.   */ -#define X86_BUG_ESPFIX		X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ +#define X86_BUG_ESPFIX			X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */  #endif -#define X86_BUG_NULL_SEG	X86_BUG(10) /* Nulling a selector preserves the base */ -#define X86_BUG_SWAPGS_FENCE	X86_BUG(11) /* SWAPGS without input dep on GS */ -#define X86_BUG_MONITOR		X86_BUG(12) /* IPI required to wake up remote CPU */ -#define X86_BUG_AMD_E400	X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_NULL_SEG		X86_BUG(10) /* Nulling a selector preserves the base */ +#define X86_BUG_SWAPGS_FENCE		X86_BUG(11) /* SWAPGS without input dep on GS */ +#define X86_BUG_MONITOR			X86_BUG(12) /* IPI required to wake up remote CPU */ +#define X86_BUG_AMD_E400		X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +  #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index c10c9128f54e..14d6d5007314 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -16,6 +16,12 @@  # define DISABLE_MPX	(1<<(X86_FEATURE_MPX & 31))  #endif +#ifdef CONFIG_X86_INTEL_UMIP +# define DISABLE_UMIP	0 +#else +# define DISABLE_UMIP	(1<<(X86_FEATURE_UMIP & 31)) +#endif +  #ifdef CONFIG_X86_64  # define DISABLE_VME		(1<<(X86_FEATURE_VME & 31))  # define DISABLE_K6_MTRR	(1<<(X86_FEATURE_K6_MTRR & 31)) @@ -63,7 +69,7 @@  #define DISABLED_MASK13	0  #define DISABLED_MASK14	0  #define DISABLED_MASK15	0 -#define DISABLED_MASK16	(DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) +#define DISABLED_MASK16	(DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)  #define DISABLED_MASK17	0  #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index bde77d7c4390..37292bb5ce60 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -6,7 +6,7 @@ RM ?= rm -f  # Make the path relative to DESTDIR, not prefix  ifndef DESTDIR -prefix?=$(HOME) +prefix ?= /usr/local  endif  mandir ?= $(prefix)/share/man  man8dir = $(mandir)/man8 diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 813826c50936..ec3052c0b004 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -45,8 +45,8 @@ $(LIBBPF)-clean:  	$(call QUIET_CLEAN, libbpf)  	$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null -prefix = /usr -bash_compdir ?= $(prefix)/share/bash-completion/completions +prefix = /usr/local +bash_compdir ?= /usr/share/bash-completion/completions  CC = gcc @@ -76,6 +76,7 @@ clean: $(LIBBPF)-clean  	$(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d  install: +	install -m 0755 -d $(prefix)/sbin  	install $(OUTPUT)bpftool $(prefix)/sbin/bpftool  	install -m 0755 -d $(bash_compdir)  	install -m 0644 bash-completion/bpftool $(bash_compdir) @@ -88,5 +89,5 @@ doc-install:  FORCE: -.PHONY: all clean FORCE +.PHONY: all clean FORCE install doc doc-install  .DEFAULT_GOAL := all diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index d6e4762170a4..d294bc8168be 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -58,11 +58,19 @@ bool show_pinned;  struct pinned_obj_table prog_table;  struct pinned_obj_table map_table; +static void __noreturn clean_and_exit(int i) +{ +	if (json_output) +		jsonw_destroy(&json_wtr); + +	exit(i); +} +  void usage(void)  {  	last_do_help(last_argc - 1, last_argv + 1); -	exit(-1); +	clean_and_exit(-1);  }  static int do_help(int argc, char **argv) @@ -280,6 +288,7 @@ int main(int argc, char **argv)  	hash_init(prog_table.table);  	hash_init(map_table.table); +	opterr = 0;  	while ((opt = getopt_long(argc, argv, "Vhpjf",  				  options, NULL)) >= 0) {  		switch (opt) { @@ -291,13 +300,25 @@ int main(int argc, char **argv)  			pretty_output = true;  			/* fall through */  		case 'j': -			json_output = true; +			if (!json_output) { +				json_wtr = jsonw_new(stdout); +				if (!json_wtr) { +					p_err("failed to create JSON writer"); +					return -1; +				} +				json_output = true; +			} +			jsonw_pretty(json_wtr, pretty_output);  			break;  		case 'f':  			show_pinned = true;  			break;  		default: -			usage(); +			p_err("unrecognized option '%s'", argv[optind - 1]); +			if (json_output) +				clean_and_exit(-1); +			else +				usage();  		}  	} @@ -306,15 +327,6 @@ int main(int argc, char **argv)  	if (argc < 0)  		usage(); -	if (json_output) { -		json_wtr = jsonw_new(stdout); -		if (!json_wtr) { -			p_err("failed to create JSON writer"); -			return -1; -		} -		jsonw_pretty(json_wtr, pretty_output); -	} -  	bfd_init();  	ret = cmd_select(cmds, argc, argv, do_help); diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 9c191e222d6f..bff330b49791 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -41,6 +41,7 @@  #include <stdbool.h>  #include <stdio.h>  #include <linux/bpf.h> +#include <linux/compiler.h>  #include <linux/kernel.h>  #include <linux/hashtable.h> @@ -50,7 +51,7 @@  #define NEXT_ARG()	({ argc--; argv++; if (argc < 0) usage(); })  #define NEXT_ARGP()	({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) -#define BAD_ARG()	({ p_err("what is '%s'?\n", *argv); -1; }) +#define BAD_ARG()	({ p_err("what is '%s'?", *argv); -1; })  #define ERR_MAX_LEN	1024 @@ -80,7 +81,7 @@ void p_info(const char *fmt, ...);  bool is_prefix(const char *pfx, const char *str);  void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep); -void usage(void) __attribute__((noreturn)); +void usage(void) __noreturn;  struct pinned_obj_table {  	DECLARE_HASHTABLE(table, 16); diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index e2450c8e88e6..a8c3a33dd185 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -523,21 +523,23 @@ static int do_show(int argc, char **argv)  				break;  			p_err("can't get next map: %s%s", strerror(errno),  			      errno == EINVAL ? " -- kernel too old?" : ""); -			return -1; +			break;  		}  		fd = bpf_map_get_fd_by_id(id);  		if (fd < 0) { +			if (errno == ENOENT) +				continue;  			p_err("can't get map by id (%u): %s",  			      id, strerror(errno)); -			return -1; +			break;  		}  		err = bpf_obj_get_info_by_fd(fd, &info, &len);  		if (err) {  			p_err("can't get map info: %s", strerror(errno));  			close(fd); -			return -1; +			break;  		}  		if (json_output) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index ad619b96c276..dded77345bfb 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -382,6 +382,8 @@ static int do_show(int argc, char **argv)  		fd = bpf_prog_get_fd_by_id(id);  		if (fd < 0) { +			if (errno == ENOENT) +				continue;  			p_err("can't get prog by id (%u): %s",  			      id, strerror(errno));  			err = -1; diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index eaa3bec273c8..4c99c57736ce 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)  	for (;;) {  		readp = &record[records_read];  		records_read += fread(readp, sizeof(struct kvp_record), -					ENTRIES_PER_BLOCK * num_blocks, -					filep); +				ENTRIES_PER_BLOCK * num_blocks - records_read, +				filep);  		if (ferror(filep)) { -			syslog(LOG_ERR, "Failed to read file, pool: %d", pool); +			syslog(LOG_ERR, +				"Failed to read file, pool: %d; error: %d %s", +				 pool, errno, strerror(errno)); +			kvp_release_lock(pool);  			exit(EXIT_FAILURE);  		} @@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)  			if (record == NULL) {  				syslog(LOG_ERR, "malloc failed"); +				kvp_release_lock(pool);  				exit(EXIT_FAILURE);  			}  			continue; @@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)  	fclose(filep);  	kvp_release_lock(pool);  } +  static int kvp_file_init(void)  {  	int  fd; -	FILE *filep; -	size_t records_read;  	char *fname; -	struct kvp_record *record; -	struct kvp_record *readp; -	int num_blocks;  	int i;  	int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; @@ -246,61 +246,19 @@ static int kvp_file_init(void)  	for (i = 0; i < KVP_POOL_COUNT; i++) {  		fname = kvp_file_info[i].fname; -		records_read = 0; -		num_blocks = 1;  		sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);  		fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);  		if (fd == -1)  			return 1; - -		filep = fopen(fname, "re"); -		if (!filep) { -			close(fd); -			return 1; -		} - -		record = malloc(alloc_unit * num_blocks); -		if (record == NULL) { -			fclose(filep); -			close(fd); -			return 1; -		} -		for (;;) { -			readp = &record[records_read]; -			records_read += fread(readp, sizeof(struct kvp_record), -					ENTRIES_PER_BLOCK, -					filep); - -			if (ferror(filep)) { -				syslog(LOG_ERR, "Failed to read file, pool: %d", -				       i); -				exit(EXIT_FAILURE); -			} - -			if (!feof(filep)) { -				/* -				 * We have more data to read. -				 */ -				num_blocks++; -				record = realloc(record, alloc_unit * -						num_blocks); -				if (record == NULL) { -					fclose(filep); -					close(fd); -					return 1; -				} -				continue; -			} -			break; -		}  		kvp_file_info[i].fd = fd; -		kvp_file_info[i].num_blocks = num_blocks; -		kvp_file_info[i].records = record; -		kvp_file_info[i].num_records = records_read; -		fclose(filep); - +		kvp_file_info[i].num_blocks = 1; +		kvp_file_info[i].records = malloc(alloc_unit); +		if (kvp_file_info[i].records == NULL) +			return 1; +		kvp_file_info[i].num_records = 0; +		kvp_update_mem_state(i);  	}  	return 0; diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 07fd03c74a77..04e32f965ad7 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -84,8 +84,6 @@  #define uninitialized_var(x) x = *(&(x)) -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) -  #include <linux/types.h>  /* @@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s  /*   * Prevent the compiler from merging or refetching reads or writes. The   * compiler is also forbidden from reordering successive instances of - * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the - * compiler is aware of some particular ordering.  One way to make the - * compiler aware of ordering is to put the two invocations of READ_ONCE, - * WRITE_ONCE or ACCESS_ONCE() in different C statements. + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements.   * - * In contrast to ACCESS_ONCE these two macros will also work on aggregate - * data types like structs or unions. If the size of the accessed data - * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a - * compile-time warning. + * These two macros will also work on aggregate data types like structs or + * unions. If the size of the accessed data type exceeds the word size of + * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will + * fall back to memcpy and print a compile-time warning.   *   * Their two major use cases are: (1) Mediating communication between   * process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise   * mutilate accesses that either do not require ordering or that interact   * with an explicit memory barrier or atomic instruction that provides the   * required ordering. diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h deleted file mode 100644 index ea32a7d3cf1b..000000000000 --- a/tools/include/linux/kmemcheck.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h index 940c1b075659..6b0c36a58fcb 100644 --- a/tools/include/linux/lockdep.h +++ b/tools/include/linux/lockdep.h @@ -48,6 +48,7 @@ static inline int debug_locks_off(void)  #define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)  #define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)  #define pr_warn pr_err +#define pr_cont pr_err  #define list_del_rcu list_del diff --git a/tools/include/uapi/asm-generic/bpf_perf_event.h b/tools/include/uapi/asm-generic/bpf_perf_event.h new file mode 100644 index 000000000000..53815d2cd047 --- /dev/null +++ b/tools/include/uapi/asm-generic/bpf_perf_event.h @@ -0,0 +1,9 @@ +#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ + +#include <linux/ptrace.h> + +/* Export kernel pt_regs structure */ +typedef struct pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */ diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h index 2dffcbf705b3..653687d9771b 100644 --- a/tools/include/uapi/asm-generic/mman.h +++ b/tools/include/uapi/asm-generic/mman.h @@ -13,6 +13,7 @@  #define MAP_NONBLOCK	0x10000		/* do not block on IO */  #define MAP_STACK	0x20000		/* give out an address that is best suited for process/thread stacks */  #define MAP_HUGETLB	0x40000		/* create a huge page mapping */ +#define MAP_SYNC	0x80000		/* perform synchronous page faults for the mapping */  /* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..13a58531e6fa --- /dev/null +++ b/tools/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,7 @@ +#if defined(__aarch64__) +#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h" +#elif defined(__s390__) +#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h" +#else +#include <uapi/asm-generic/bpf_perf_event.h> +#endif diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 97677cd6964d..6fdff5945c8a 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -737,6 +737,28 @@ struct drm_syncobj_array {  	__u32 pad;  }; +/* Query current scanout sequence number */ +struct drm_crtc_get_sequence { +	__u32 crtc_id;		/* requested crtc_id */ +	__u32 active;		/* return: crtc output is active */ +	__u64 sequence;		/* return: most recent vblank sequence */ +	__s64 sequence_ns;	/* return: most recent time of first pixel out */ +}; + +/* Queue event to be delivered at specified sequence. Time stamp marks + * when the first pixel of the refresh cycle leaves the display engine + * for the display + */ +#define DRM_CRTC_SEQUENCE_RELATIVE		0x00000001	/* sequence is relative to current */ +#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS		0x00000002	/* Use next sequence if we've missed */ + +struct drm_crtc_queue_sequence { +	__u32 crtc_id; +	__u32 flags; +	__u64 sequence;		/* on input, target sequence. on output, actual sequence */ +	__u64 user_data;	/* user data passed to event */ +}; +  #if defined(__cplusplus)  }  #endif @@ -819,6 +841,9 @@ extern "C" {  #define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank) +#define DRM_IOCTL_CRTC_GET_SEQUENCE	DRM_IOWR(0x3b, struct drm_crtc_get_sequence) +#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE	DRM_IOWR(0x3c, struct drm_crtc_queue_sequence) +  #define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, struct drm_update_draw)  #define DRM_IOCTL_MODE_GETRESOURCES	DRM_IOWR(0xA0, struct drm_mode_card_res) @@ -863,6 +888,11 @@ extern "C" {  #define DRM_IOCTL_SYNCOBJ_RESET		DRM_IOWR(0xC4, struct drm_syncobj_array)  #define DRM_IOCTL_SYNCOBJ_SIGNAL	DRM_IOWR(0xC5, struct drm_syncobj_array) +#define DRM_IOCTL_MODE_CREATE_LEASE	DRM_IOWR(0xC6, struct drm_mode_create_lease) +#define DRM_IOCTL_MODE_LIST_LESSEES	DRM_IOWR(0xC7, struct drm_mode_list_lessees) +#define DRM_IOCTL_MODE_GET_LEASE	DRM_IOWR(0xC8, struct drm_mode_get_lease) +#define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease) +  /**   * Device specific ioctls should only be in their respective headers   * The device specific ioctl range is from 0x40 to 0x9f. @@ -893,6 +923,7 @@ struct drm_event {  #define DRM_EVENT_VBLANK 0x01  #define DRM_EVENT_FLIP_COMPLETE 0x02 +#define DRM_EVENT_CRTC_SEQUENCE	0x03  struct drm_event_vblank {  	struct drm_event base; @@ -903,6 +934,16 @@ struct drm_event_vblank {  	__u32 crtc_id; /* 0 on older kernels that do not support this */  }; +/* Event delivered at sequence. Time stamp marks when the first pixel + * of the refresh cycle leaves the display engine for the display + */ +struct drm_event_crtc_sequence { +	struct drm_event	base; +	__u64			user_data; +	__s64			time_ns; +	__u64			sequence; +}; +  /* typedef area */  #ifndef __KERNEL__  typedef struct drm_clip_rect drm_clip_rect_t; diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 9816590d3ad2..ac3c6503ca27 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {  #define I915_PARAM_MIN_EU_IN_POOL	 39  #define I915_PARAM_MMAP_GTT_VERSION	 40 -/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution +/* + * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution   * priorities and the driver will attempt to execute batches in priority order. + * The param returns a capability bitmask, nonzero implies that the scheduler + * is enabled, with different features present according to the mask. + * + * The initial priority for each batch is supplied by the context and is + * controlled via I915_CONTEXT_PARAM_PRIORITY.   */  #define I915_PARAM_HAS_SCHEDULER	 41 +#define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0) +#define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1) +#define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2) +  #define I915_PARAM_HUC_STATUS		 42  /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of @@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {  	 * be specified  	 */  	__u64 offset; +#define I915_REG_READ_8B_WA (1ul << 0) +  	__u64 val; /* Return value */  };  /* Known registers:   *   * Render engine timestamp - 0x2358 + 64bit - gen7+   * - Note this register returns an invalid value if using the default - *   single instruction 8byte read, in order to workaround that use - *   offset (0x2538 | 1) instead. + *   single instruction 8byte read, in order to workaround that pass + *   flag I915_REG_READ_8B_WA in offset field.   *   */ @@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {  #define I915_CONTEXT_PARAM_GTT_SIZE	0x3  #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4  #define I915_CONTEXT_PARAM_BANNABLE	0x5 +#define I915_CONTEXT_PARAM_PRIORITY	0x6 +#define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */ +#define   I915_CONTEXT_DEFAULT_PRIORITY		0 +#define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */  	__u64 value;  }; @@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {  	__u32 n_boolean_regs;  	__u32 n_flex_regs; -	__u64 __user mux_regs_ptr; -	__u64 __user boolean_regs_ptr; -	__u64 __user flex_regs_ptr; +	/* +	 * These fields are pointers to tuples of u32 values (register +	 * address, value). For example the expected length of the buffer +	 * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). +	 */ +	__u64 mux_regs_ptr; +	__u64 boolean_regs_ptr; +	__u64 flex_regs_ptr;  };  #if defined(__cplusplus) diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h index 067427259820..8f95303f9d80 100644 --- a/tools/include/uapi/linux/bpf_perf_event.h +++ b/tools/include/uapi/linux/bpf_perf_event.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */  /* Copyright (c) 2016 Facebook   *   * This program is free software; you can redistribute it and/or @@ -7,11 +8,10 @@  #ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__  #define _UAPI__LINUX_BPF_PERF_EVENT_H__ -#include <linux/types.h> -#include <linux/ptrace.h> +#include <asm/bpf_perf_event.h>  struct bpf_perf_event_data { -	struct pt_regs regs; +	bpf_user_pt_regs_t regs;  	__u64 sample_period;  }; diff --git a/tools/include/uapi/linux/kcmp.h b/tools/include/uapi/linux/kcmp.h index 481e103da78e..ef1305010925 100644 --- a/tools/include/uapi/linux/kcmp.h +++ b/tools/include/uapi/linux/kcmp.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */  #ifndef _UAPI_LINUX_KCMP_H  #define _UAPI_LINUX_KCMP_H diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 7e99999d6236..496e59a2738b 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -630,9 +630,9 @@ struct kvm_s390_irq {  struct kvm_s390_irq_state {  	__u64 buf; -	__u32 flags; +	__u32 flags;        /* will stay unused for compatibility reasons */  	__u32 len; -	__u32 reserved[4]; +	__u32 reserved[4];  /* will stay unused for compatibility reasons */  };  /* for KVM_SET_GUEST_DEBUG */ @@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {  #define KVM_CAP_PPC_SMT_POSSIBLE 147  #define KVM_CAP_HYPERV_SYNIC2 148  #define KVM_CAP_HYPERV_VP_INDEX 149 +#define KVM_CAP_S390_AIS_MIGRATION 150  #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 362493a2f950..b9a4953018ed 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -942,6 +942,7 @@ enum perf_callchain_context {  #define PERF_AUX_FLAG_TRUNCATED		0x01	/* record was truncated to fit */  #define PERF_AUX_FLAG_OVERWRITE		0x02	/* snapshot from overwrite mode */  #define PERF_AUX_FLAG_PARTIAL		0x04	/* record contains gaps */ +#define PERF_AUX_FLAG_COLLISION		0x08	/* sample collided with another */  #define PERF_FLAG_FD_NO_GROUP		(1UL << 0)  #define PERF_FLAG_FD_OUTPUT		(1UL << 1) diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index a8d0759a9e40..af5f8c2df87a 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */  #ifndef _LINUX_PRCTL_H  #define _LINUX_PRCTL_H @@ -197,4 +198,13 @@ struct prctl_mm_map {  # define PR_CAP_AMBIENT_LOWER		3  # define PR_CAP_AMBIENT_CLEAR_ALL	4 +/* arm64 Scalable Vector Extension controls */ +/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */ +#define PR_SVE_SET_VL			50	/* set task vector length */ +# define PR_SVE_SET_VL_ONEXEC		(1 << 18) /* defer effect until exec */ +#define PR_SVE_GET_VL			51	/* get task vector length */ +/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */ +# define PR_SVE_VL_LEN_MASK		0xffff +# define PR_SVE_VL_INHERIT		(1 << 17) /* inherit across exec */ +  #endif /* _LINUX_PRCTL_H */ diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 217cf6f95c36..a5684d0968b4 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -478,7 +478,7 @@ class Provider(object):      @staticmethod      def is_field_wanted(fields_filter, field):          """Indicate whether field is valid according to fields_filter.""" -        if not fields_filter or fields_filter == "help": +        if not fields_filter:              return True          return re.match(fields_filter, field) is not None @@ -549,8 +549,8 @@ class TracepointProvider(Provider):      def update_fields(self, fields_filter):          """Refresh fields, applying fields_filter""" -        self._fields = [field for field in self.get_available_fields() -                        if self.is_field_wanted(fields_filter, field)] +        self.fields = [field for field in self.get_available_fields() +                       if self.is_field_wanted(fields_filter, field)]      @staticmethod      def get_online_cpus(): @@ -950,7 +950,8 @@ class Tui(object):              curses.nocbreak()              curses.endwin() -    def get_all_gnames(self): +    @staticmethod +    def get_all_gnames():          """Returns a list of (pid, gname) tuples of all running guests"""          res = []          try: @@ -963,7 +964,7 @@ class Tui(object):              # perform a sanity check before calling the more expensive              # function to possibly extract the guest name              if ' -name ' in line[1]: -                res.append((line[0], self.get_gname_from_pid(line[0]))) +                res.append((line[0], Tui.get_gname_from_pid(line[0])))          child.stdout.close()          return res @@ -984,7 +985,8 @@ class Tui(object):          except Exception:              self.screen.addstr(row + 1, 2, 'Not available') -    def get_pid_from_gname(self, gname): +    @staticmethod +    def get_pid_from_gname(gname):          """Fuzzy function to convert guest name to QEMU process pid.          Returns a list of potential pids, can be empty if no match found. @@ -992,7 +994,7 @@ class Tui(object):          """          pids = [] -        for line in self.get_all_gnames(): +        for line in Tui.get_all_gnames():              if gname == line[1]:                  pids.append(int(line[0])) @@ -1090,15 +1092,16 @@ class Tui(object):              # sort by totals              return (0, -stats[x][0])          total = 0. -        for val in stats.values(): -            total += val[0] +        for key in stats.keys(): +            if key.find('(') is -1: +                total += stats[key][0]          if self._sorting == SORT_DEFAULT:              sortkey = sortCurAvg          else:              sortkey = sortTotal +        tavg = 0          for key in sorted(stats.keys(), key=sortkey): - -            if row >= self.screen.getmaxyx()[0]: +            if row >= self.screen.getmaxyx()[0] - 1:                  break              values = stats[key]              if not values[0] and not values[1]: @@ -1110,9 +1113,15 @@ class Tui(object):                  self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %                                     (key, values[0], values[0] * 100 / total,                                      cur)) +                if cur is not '' and key.find('(') is -1: +                    tavg += cur              row += 1          if row == 3:              self.screen.addstr(4, 1, 'No matching events reported yet') +        else: +            self.screen.addstr(row, 1, '%-40s %10d        %8s' % +                               ('Total', total, tavg if tavg else ''), +                               curses.A_BOLD)          self.screen.refresh()      def show_msg(self, text): @@ -1358,7 +1367,7 @@ class Tui(object):                  if char == 'x':                      self.update_drilldown()                      # prevents display of current values on next refresh -                    self.stats.get() +                    self.stats.get(self._display_guests)              except KeyboardInterrupt:                  break              except curses.error: @@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.          try:              pids = Tui.get_pid_from_gname(val)          except: -            raise optparse.OptionValueError('Error while searching for guest ' -                                            '"{}", use "-p" to specify a pid ' -                                            'instead'.format(val)) +            sys.exit('Error while searching for guest "{}". Use "-p" to ' +                     'specify a pid instead?'.format(val))          if len(pids) == 0: -            raise optparse.OptionValueError('No guest by the name "{}" ' -                                            'found'.format(val)) +            sys.exit('Error: No guest by the name "{}" found'.format(val))          if len(pids) > 1: -            raise optparse.OptionValueError('Multiple processes found (pids: ' -                                            '{}) - use "-p" to specify a pid ' -                                            'instead'.format(" ".join(pids))) +            sys.exit('Error: Multiple processes found (pids: {}). Use "-p" ' +                     'to specify the desired pid'.format(" ".join(pids)))          parser.values.pid = pids[0]      optparser = optparse.OptionParser(description=description_text, @@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.                           help='restrict statistics to guest by name',                           callback=cb_guest_to_pid,                           ) -    (options, _) = optparser.parse_args(sys.argv) +    options, unkn = optparser.parse_args(sys.argv) +    if len(unkn) != 1: +        sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:])) +    try: +        # verify that we were passed a valid regex up front +        re.compile(options.fields) +    except re.error: +        sys.exit('Error: "' + options.fields + '" is not a valid regular ' +                 'expression') +      return options @@ -1564,16 +1579,13 @@ def main():      stats = Stats(options) -    if options.fields == "help": -        event_list = "\n" -        s = stats.get() -        for key in s.keys(): -            if key.find('(') != -1: -                key = key[0:key.find('(')] -            if event_list.find('\n' + key + '\n') == -1: -                event_list += key + '\n' -        sys.stdout.write(event_list) -        return "" +    if options.fields == 'help': +        stats.fields_filter = None +        event_list = [] +        for key in stats.get().keys(): +            event_list.append(key.split('(', 1)[0]) +        sys.stdout.write('  ' + '\n  '.join(sorted(set(event_list))) + '\n') +        sys.exit(0)      if options.log:          log(stats) diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt index e5cf836be8a1..b5b3810c9e94 100644 --- a/tools/kvm/kvm_stat/kvm_stat.txt +++ b/tools/kvm/kvm_stat/kvm_stat.txt @@ -50,6 +50,8 @@ INTERACTIVE COMMANDS  *s*::   set update interval  *x*::	toggle reporting of stats for child trace events + ::     *Note*: The stats for the parents summarize the respective child trace +                events  Press any other key to refresh statistics immediately. @@ -86,7 +88,7 @@ OPTIONS  -f<fields>::  --fields=<fields>:: -	fields to display (regex) +	fields to display (regex), "-f help" for a list of available events  -h::  --help:: diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 0f94af3ccaaa..e6acc281dd37 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,9 +7,11 @@ ARCH := x86  endif  # always use the host compiler -CC = gcc -LD = ld -AR = ar +HOSTCC	?= gcc +HOSTLD	?= ld +CC	 = $(HOSTCC) +LD	 = $(HOSTLD) +AR	 = ar  ifeq ($(srctree),)  srctree := $(patsubst %/,%,$(dir $(CURDIR))) @@ -44,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE  	@$(MAKE) $(build)=objtool  $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) -	@./sync-check.sh +	@$(CONFIG_SHELL) ./sync-check.sh  	$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 8acfc47af70e..540a209b78ab 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,  			*type = INSN_STACK;  			op->src.type = OP_SRC_ADD;  			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r]; -			op->dest.type = OP_SRC_REG; +			op->dest.type = OP_DEST_REG;  			op->dest.reg = CFI_SP;  		}  		break; diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt index 12e377184ee4..e0b85930dd77 100644 --- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt +++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)  fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)  fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)  fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) -ff: +ff: UD0  EndTable  Table: 3-byte opcode 1 (0x0f 0x38) @@ -717,7 +717,7 @@ AVXcode: 2  7e: vpermt2d/q Vx,Hx,Wx (66),(ev)  7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)  80: INVEPT Gy,Mdq (66) -81: INVPID Gy,Mdq (66) +81: INVVPID Gy,Mdq (66)  82: INVPCID Gy,Mdq (66)  83: vpmultishiftqb Vx,Hx,Wx (66),(ev)  88: vexpandps/d Vpd,Wpd (66),(ev) @@ -896,7 +896,7 @@ EndTable  GrpTable: Grp3_1  0: TEST Eb,Ib -1: +1: TEST Eb,Ib  2: NOT Eb  3: NEG Eb  4: MUL AL,Eb @@ -970,6 +970,15 @@ GrpTable: Grp9  EndTable  GrpTable: Grp10 +# all are UD1 +0: UD1 +1: UD1 +2: UD1 +3: UD1 +4: UD1 +5: UD1 +6: UD1 +7: UD1  EndTable  # Grp11A and Grp11B are expressed as Grp11 in Intel SDM diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c index 4c6b5c9ef073..91e8e19ff5e0 100644 --- a/tools/objtool/builtin-orc.c +++ b/tools/objtool/builtin-orc.c @@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv)  	const char *objname;  	argc--; argv++; +	if (argc <= 0) +		usage_with_options(orc_usage, check_options); +  	if (!strncmp(argv[0], "gen", 3)) {  		argc = parse_options(argc, argv, check_options, orc_usage, 0);  		if (argc != 1) @@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv)  		objname = argv[0];  		return check(objname, no_fp, no_unreachable, true); -  	}  	if (!strcmp(argv[0], "dump")) { diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9b341584eb1b..f40d46e24bcc 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -428,6 +428,40 @@ static void add_ignores(struct objtool_file *file)  }  /* + * FIXME: For now, just ignore any alternatives which add retpolines.  This is + * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. + * But it at least allows objtool to understand the control flow *around* the + * retpoline. + */ +static int add_nospec_ignores(struct objtool_file *file) +{ +	struct section *sec; +	struct rela *rela; +	struct instruction *insn; + +	sec = find_section_by_name(file->elf, ".rela.discard.nospec"); +	if (!sec) +		return 0; + +	list_for_each_entry(rela, &sec->rela_list, list) { +		if (rela->sym->type != STT_SECTION) { +			WARN("unexpected relocation symbol type in %s", sec->name); +			return -1; +		} + +		insn = find_insn(file, rela->sym->sec, rela->addend); +		if (!insn) { +			WARN("bad .discard.nospec entry"); +			return -1; +		} + +		insn->ignore_alts = true; +	} + +	return 0; +} + +/*   * Find the destination instructions for all jumps.   */  static int add_jump_destinations(struct objtool_file *file) @@ -456,6 +490,13 @@ static int add_jump_destinations(struct objtool_file *file)  		} else if (rela->sym->sec->idx) {  			dest_sec = rela->sym->sec;  			dest_off = rela->sym->sym.st_value + rela->addend + 4; +		} else if (strstr(rela->sym->name, "_indirect_thunk_")) { +			/* +			 * Retpoline jumps are really dynamic jumps in +			 * disguise, so convert them accordingly. +			 */ +			insn->type = INSN_JUMP_DYNAMIC; +			continue;  		} else {  			/* sibling call */  			insn->jump_dest = 0; @@ -502,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file)  			dest_off = insn->offset + insn->len + insn->immediate;  			insn->call_dest = find_symbol_by_offset(insn->sec,  								dest_off); +			/* +			 * FIXME: Thanks to retpolines, it's now considered +			 * normal for a function to call within itself.  So +			 * disable this warning for now. +			 */ +#if 0  			if (!insn->call_dest) {  				WARN_FUNC("can't find call dest symbol at offset 0x%lx",  					  insn->sec, insn->offset, dest_off);  				return -1;  			} +#endif  		} else if (rela->sym->type == STT_SECTION) {  			insn->call_dest = find_symbol_by_offset(rela->sym->sec,  								rela->addend+4); @@ -671,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file)  		return ret;  	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { -		alt = malloc(sizeof(*alt)); -		if (!alt) { -			WARN("malloc failed"); -			ret = -1; -			goto out; -		}  		orig_insn = find_insn(file, special_alt->orig_sec,  				      special_alt->orig_off); @@ -687,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file)  			goto out;  		} +		/* Ignore retpoline alternatives. */ +		if (orig_insn->ignore_alts) +			continue; +  		new_insn = NULL;  		if (!special_alt->group || special_alt->new_len) {  			new_insn = find_insn(file, special_alt->new_sec, @@ -712,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file)  				goto out;  		} +		alt = malloc(sizeof(*alt)); +		if (!alt) { +			WARN("malloc failed"); +			ret = -1; +			goto out; +		} +  		alt->insn = new_insn;  		list_add_tail(&alt->list, &orig_insn->alts); @@ -1028,6 +1081,10 @@ static int decode_sections(struct objtool_file *file)  	add_ignores(file); +	ret = add_nospec_ignores(file); +	if (ret) +		return ret; +  	ret = add_jump_destinations(file);  	if (ret)  		return ret; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 47d9ea70a83d..dbadb304a410 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -44,7 +44,7 @@ struct instruction {  	unsigned int len;  	unsigned char type;  	unsigned long immediate; -	bool alt_group, visited, dead_end, ignore, hint, save, restore; +	bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;  	struct symbol *call_dest;  	struct instruction *jump_dest;  	struct list_head alts; diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 36c5bf6a2675..c3343820916a 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -76,7 +76,8 @@ int orc_dump(const char *_objname)  	int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;  	struct orc_entry *orc = NULL;  	char *name; -	unsigned long nr_sections, orc_ip_addr = 0; +	size_t nr_sections; +	Elf64_Addr orc_ip_addr = 0;  	size_t shstrtab_idx;  	Elf *elf;  	Elf_Scn *scn; @@ -187,10 +188,10 @@ int orc_dump(const char *_objname)  				return -1;  			} -			printf("%s+%lx:", name, rela.r_addend); +			printf("%s+%llx:", name, (unsigned long long)rela.r_addend);  		} else { -			printf("%lx:", orc_ip_addr + (i * sizeof(int)) + orc_ip[i]); +			printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));  		} diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index e5ca31429c9b..e61fe703197b 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file)  	/* create .orc_unwind_ip and .rela.orc_unwind_ip sections */  	sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx); +	if (!sec) +		return -1;  	ip_relasec = elf_create_rela_section(file->elf, sec);  	if (!ip_relasec) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index ed65e82f034e..0294bfb6c5f8 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -188,9 +188,7 @@ ifdef PYTHON_CONFIG    PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))    PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil    PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) -  ifeq ($(CC_NO_CLANG), 1) -    PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS)) -  endif +  PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))    FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)  endif @@ -576,14 +574,15 @@ ifndef NO_GTK2    endif  endif -  ifdef NO_LIBPERL    CFLAGS += -DNO_LIBPERL  else    PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)    PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))    PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) -  PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` +  PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null) +  PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS)) +  PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))    FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)    ifneq ($(feature-libperl), 1) diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile index 21322e0385b8..09ba923debe8 100644 --- a/tools/perf/arch/s390/Makefile +++ b/tools/perf/arch/s390/Makefile @@ -2,3 +2,4 @@ ifndef NO_DWARF  PERF_HAVE_DWARF_REGS := 1  endif  HAVE_KVM_STAT_SUPPORT := 1 +PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h index d2df54a6bc5a..bcfbaed78cc2 100644 --- a/tools/perf/arch/s390/include/perf_regs.h +++ b/tools/perf/arch/s390/include/perf_regs.h @@ -3,7 +3,7 @@  #include <stdlib.h>  #include <linux/types.h> -#include <../../../../arch/s390/include/uapi/asm/perf_regs.h> +#include <asm/perf_regs.h>  void perf_regs_load(u64 *regs); diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c index f47576ce13ea..a8ace5cc6301 100644 --- a/tools/perf/arch/s390/util/dwarf-regs.c +++ b/tools/perf/arch/s390/util/dwarf-regs.c @@ -2,17 +2,43 @@  /*   * Mapping of DWARF debug register numbers into register names.   * - *    Copyright IBM Corp. 2010 - *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, + * Copyright IBM Corp. 2010, 2017 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, + *	      Hendrik Brueckner <brueckner@linux.vnet.ibm.com>   *   */ +#include <errno.h>  #include <stddef.h> -#include <dwarf-regs.h> +#include <stdlib.h>  #include <linux/kernel.h> +#include <asm/ptrace.h> +#include <string.h> +#include <dwarf-regs.h>  #include "dwarf-regs-table.h"  const char *get_arch_regstr(unsigned int n)  {  	return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n];  } + +/* + * Convert the register name into an offset to struct pt_regs (kernel). + * This is required by the BPF prologue generator.  The BPF + * program is called in the BPF overflow handler in the perf + * core. + */ +int regs_query_register_offset(const char *name) +{ +	unsigned long gpr; + +	if (!name || strncmp(name, "%r", 2)) +		return -EINVAL; + +	errno = 0; +	gpr = strtoul(name + 2, NULL, 10); +	if (errno || gpr >= 16) +		return -EINVAL; + +	return offsetof(user_pt_regs, gprs) + 8 * gpr; +} diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index d95fdcc26f4b..944070e98a2c 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -216,6 +216,47 @@ static const char * const numa_usage[] = {  	NULL  }; +/* + * To get number of numa nodes present. + */ +static int nr_numa_nodes(void) +{ +	int i, nr_nodes = 0; + +	for (i = 0; i < g->p.nr_nodes; i++) { +		if (numa_bitmask_isbitset(numa_nodes_ptr, i)) +			nr_nodes++; +	} + +	return nr_nodes; +} + +/* + * To check if given numa node is present. + */ +static int is_node_present(int node) +{ +	return numa_bitmask_isbitset(numa_nodes_ptr, node); +} + +/* + * To check given numa node has cpus. + */ +static bool node_has_cpus(int node) +{ +	struct bitmask *cpu = numa_allocate_cpumask(); +	unsigned int i; + +	if (cpu && !numa_node_to_cpus(node, cpu)) { +		for (i = 0; i < cpu->size; i++) { +			if (numa_bitmask_isbitset(cpu, i)) +				return true; +		} +	} + +	return false; /* lets fall back to nocpus safely */ +} +  static cpu_set_t bind_to_cpu(int target_cpu)  {  	cpu_set_t orig_mask, mask; @@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)  static cpu_set_t bind_to_node(int target_node)  { -	int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; +	int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();  	cpu_set_t orig_mask, mask;  	int cpu;  	int ret; -	BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); +	BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);  	BUG_ON(!cpus_per_node);  	ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); @@ -649,7 +690,7 @@ static int parse_setup_node_list(void)  			int i;  			for (i = 0; i < mul; i++) { -				if (t >= g->p.nr_tasks) { +				if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {  					printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);  					goto out;  				} @@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)  	sum = 0;  	for (node = 0; node < g->p.nr_nodes; node++) { +		if (!is_node_present(node)) +			continue;  		nr = nodes[node];  		nr_min = min(nr, nr_min);  		nr_max = max(nr, nr_max); @@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)  	process_groups = 0;  	for (node = 0; node < g->p.nr_nodes; node++) { -		int processes = count_node_processes(node); +		int processes; +		if (!is_node_present(node)) +			continue; +		processes = count_node_processes(node);  		nr = nodes[node];  		tprintf(" %2d/%-2d", nr, processes); @@ -1291,7 +1337,7 @@ static void print_summary(void)  	printf("\n ###\n");  	printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", -		g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); +		g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);  	printf(" #      %5dx %5ldMB global  shared mem operations\n",  			g->p.nr_loops, g->p.bytes_global/1024/1024);  	printf(" #      %5dx %5ldMB process shared mem operations\n", diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index bd1fedef3d1c..a0f7ed2b869b 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)  		add_man_viewer(value);  		return 0;  	} -	if (!strstarts(var, "man.")) +	if (strstarts(var, "man."))  		return add_man_viewer_info(var, value);  	return 0; @@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)  	if (!perf_cmd)  		return "perf"; -	else if (!strstarts(perf_cmd, "perf")) +	else if (strstarts(perf_cmd, "perf"))  		return perf_cmd;  	return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s; diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3d7f33e19df2..003255910c05 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -339,6 +339,22 @@ static int record__open(struct record *rec)  	struct perf_evsel_config_term *err_term;  	int rc = 0; +	/* +	 * For initial_delay we need to add a dummy event so that we can track +	 * PERF_RECORD_MMAP while we wait for the initial delay to enable the +	 * real events, the ones asked by the user. +	 */ +	if (opts->initial_delay) { +		if (perf_evlist__add_dummy(evlist)) +			return -ENOMEM; + +		pos = perf_evlist__first(evlist); +		pos->tracking = 0; +		pos = perf_evlist__last(evlist); +		pos->tracking = 1; +		pos->attr.enable_on_exec = 1; +	} +  	perf_evlist__config(evlist, opts, &callchain_param);  	evlist__for_each_entry(evlist, pos) { @@ -749,17 +765,19 @@ static int record__synthesize(struct record *rec, bool tail)  			goto out;  	} -	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, -						 machine); -	WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n" -			   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" -			   "Check /proc/kallsyms permission or run as root.\n"); - -	err = perf_event__synthesize_modules(tool, process_synthesized_event, -					     machine); -	WARN_ONCE(err < 0, "Couldn't record kernel module information.\n" -			   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" -			   "Check /proc/modules permission or run as root.\n"); +	if (!perf_evlist__exclude_kernel(rec->evlist)) { +		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, +							 machine); +		WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n" +				   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" +				   "Check /proc/kallsyms permission or run as root.\n"); + +		err = perf_event__synthesize_modules(tool, process_synthesized_event, +						     machine); +		WARN_ONCE(err < 0, "Couldn't record kernel module information.\n" +				   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" +				   "Check /proc/modules permission or run as root.\n"); +	}  	if (perf_guest) {  		machines__process_guests(&session->machines, @@ -1693,7 +1711,7 @@ int cmd_record(int argc, const char **argv)  	err = -ENOMEM; -	if (symbol_conf.kptr_restrict) +	if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))  		pr_warning(  "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"  "check /proc/sys/kernel/kptr_restrict.\n\n" diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 1394cd8d96f7..af5dd038195e 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -441,6 +441,9 @@ static void report__warn_kptr_restrict(const struct report *rep)  	struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);  	struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; +	if (perf_evlist__exclude_kernel(rep->session->evlist)) +		return; +  	if (kernel_map == NULL ||  	    (kernel_map->dso->hit &&  	     (kernel_kmap->ref_reloc_sym == NULL || diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 68f36dc0344f..9b43bda45a41 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1955,6 +1955,16 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)  	struct perf_evsel *evsel;  	evlist__for_each_entry(script->session->evlist, evsel) { +		/* +		 * Already setup? I.e. we may be called twice in cases like +		 * Intel PT, one for the intel_pt// and dummy events, then +		 * for the evsels syntheized from the auxtrace info. +		 * +		 * Ses perf_script__process_auxtrace_info. +		 */ +		if (evsel->priv != NULL) +			continue; +  		evsel->priv = perf_evsel_script__new(evsel, script->session->data);  		if (evsel->priv == NULL)  			goto out_err_fclose; @@ -2838,6 +2848,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,  	return set_maps(script);  } +#ifdef HAVE_AUXTRACE_SUPPORT +static int perf_script__process_auxtrace_info(struct perf_tool *tool, +					      union perf_event *event, +					      struct perf_session *session) +{ +	int ret = perf_event__process_auxtrace_info(tool, event, session); + +	if (ret == 0) { +		struct perf_script *script = container_of(tool, struct perf_script, tool); + +		ret = perf_script__setup_per_event_dump(script); +	} + +	return ret; +} +#else +#define perf_script__process_auxtrace_info 0 +#endif +  int cmd_script(int argc, const char **argv)  {  	bool show_full_info = false; @@ -2866,7 +2895,7 @@ int cmd_script(int argc, const char **argv)  			.feature	 = perf_event__process_feature,  			.build_id	 = perf_event__process_build_id,  			.id_index	 = perf_event__process_id_index, -			.auxtrace_info	 = perf_event__process_auxtrace_info, +			.auxtrace_info	 = perf_script__process_auxtrace_info,  			.auxtrace	 = perf_event__process_auxtrace,  			.auxtrace_error	 = perf_event__process_auxtrace_error,  			.stat		 = perf_event__process_stat_event, diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 477a8699f0b5..9e0d2645ae13 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -77,6 +77,7 @@  #include "sane_ctype.h"  static volatile int done; +static volatile int resize;  #define HEADER_LINE_NR  5 @@ -85,11 +86,13 @@ static void perf_top__update_print_entries(struct perf_top *top)  	top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;  } -static void perf_top__sig_winch(int sig __maybe_unused, -				siginfo_t *info __maybe_unused, void *arg) +static void winch_sig(int sig __maybe_unused)  { -	struct perf_top *top = arg; +	resize = 1; +} +static void perf_top__resize(struct perf_top *top) +{  	get_term_dimensions(&top->winsize);  	perf_top__update_print_entries(top);  } @@ -473,12 +476,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)  		case 'e':  			prompt_integer(&top->print_entries, "Enter display entries (lines)");  			if (top->print_entries == 0) { -				struct sigaction act = { -					.sa_sigaction = perf_top__sig_winch, -					.sa_flags     = SA_SIGINFO, -				}; -				perf_top__sig_winch(SIGWINCH, NULL, top); -				sigaction(SIGWINCH, &act, NULL); +				perf_top__resize(top); +				signal(SIGWINCH, winch_sig);  			} else {  				signal(SIGWINCH, SIG_DFL);  			} @@ -732,14 +731,16 @@ static void perf_event__process_sample(struct perf_tool *tool,  	if (!machine->kptr_restrict_warned &&  	    symbol_conf.kptr_restrict &&  	    al.cpumode == PERF_RECORD_MISC_KERNEL) { -		ui__warning( +		if (!perf_evlist__exclude_kernel(top->session->evlist)) { +			ui__warning(  "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"  "Check /proc/sys/kernel/kptr_restrict.\n\n"  "Kernel%s samples will not be resolved.\n",  			  al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?  			  " modules" : ""); -		if (use_browser <= 0) -			sleep(5); +			if (use_browser <= 0) +				sleep(5); +		}  		machine->kptr_restrict_warned = true;  	} @@ -1030,6 +1031,11 @@ static int __cmd_top(struct perf_top *top)  		if (hits == top->samples)  			ret = perf_evlist__poll(top->evlist, 100); + +		if (resize) { +			perf_top__resize(top); +			resize = 0; +		}  	}  	ret = 0; @@ -1352,12 +1358,8 @@ int cmd_top(int argc, const char **argv)  	get_term_dimensions(&top.winsize);  	if (top.print_entries == 0) { -		struct sigaction act = { -			.sa_sigaction = perf_top__sig_winch, -			.sa_flags     = SA_SIGINFO, -		};  		perf_top__update_print_entries(&top); -		sigaction(SIGWINCH, &act, NULL); +		signal(SIGWINCH, winch_sig);  	}  	status = __cmd_top(&top); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index f2757d38c7d7..84debdbad327 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -1152,12 +1152,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)  	if (trace->host == NULL)  		return -ENOMEM; -	if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0) -		return -errno; +	err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); +	if (err < 0) +		goto out;  	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,  					    evlist->threads, trace__tool_process, false,  					    trace->opts.proc_map_timeout, 1); +out:  	if (err)  		symbol__exit(); diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 77406d25e521..3e64f10b6d66 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -21,6 +21,7 @@ arch/x86/include/asm/cpufeatures.h  arch/arm/include/uapi/asm/perf_regs.h  arch/arm64/include/uapi/asm/perf_regs.h  arch/powerpc/include/uapi/asm/perf_regs.h +arch/s390/include/uapi/asm/perf_regs.h  arch/x86/include/uapi/asm/perf_regs.h  arch/x86/include/uapi/asm/kvm.h  arch/x86/include/uapi/asm/kvm_perf.h @@ -30,6 +31,7 @@ arch/x86/include/uapi/asm/vmx.h  arch/powerpc/include/uapi/asm/kvm.h  arch/s390/include/uapi/asm/kvm.h  arch/s390/include/uapi/asm/kvm_perf.h +arch/s390/include/uapi/asm/ptrace.h  arch/s390/include/uapi/asm/sie.h  arch/arm/include/uapi/asm/kvm.h  arch/arm64/include/uapi/asm/kvm.h diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c index cf36de7ea255..0c6d1002b524 100644 --- a/tools/perf/jvmti/jvmti_agent.c +++ b/tools/perf/jvmti/jvmti_agent.c @@ -384,13 +384,13 @@ jvmti_write_code(void *agent, char const *sym,  }  int -jvmti_write_debug_info(void *agent, uint64_t code, const char *file, -		       jvmti_line_info_t *li, int nr_lines) +jvmti_write_debug_info(void *agent, uint64_t code, +    int nr_lines, jvmti_line_info_t *li, +    const char * const * file_names)  {  	struct jr_code_debug_info rec; -	size_t sret, len, size, flen; +	size_t sret, len, size, flen = 0;  	uint64_t addr; -	const char *fn = file;  	FILE *fp = agent;  	int i; @@ -405,7 +405,9 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,  		return -1;  	} -	flen = strlen(file) + 1; +	for (i = 0; i < nr_lines; ++i) { +	    flen += strlen(file_names[i]) + 1; +	}  	rec.p.id        = JIT_CODE_DEBUG_INFO;  	size            = sizeof(rec); @@ -421,7 +423,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,  	 * file[]   : source file name  	 */  	size += nr_lines * sizeof(struct debug_entry); -	size += flen * nr_lines; +	size += flen;  	rec.p.total_size = size;  	/* @@ -452,7 +454,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,  		if (sret != 1)  			goto error; -		sret = fwrite_unlocked(fn, flen, 1, fp); +		sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp);  		if (sret != 1)  			goto error;  	} diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h index fe32d8344a82..6ed82f6c06dd 100644 --- a/tools/perf/jvmti/jvmti_agent.h +++ b/tools/perf/jvmti/jvmti_agent.h @@ -14,6 +14,7 @@ typedef struct {  	unsigned long	pc;  	int		line_number;  	int		discrim; /* discriminator -- 0 for now */ +	jmethodID	methodID;  } jvmti_line_info_t;  void *jvmti_open(void); @@ -22,11 +23,9 @@ int   jvmti_write_code(void *agent, char const *symbol_name,  		       uint64_t vma, void const *code,  		       const unsigned int code_size); -int   jvmti_write_debug_info(void *agent, -		             uint64_t code, -			     const char *file, +int   jvmti_write_debug_info(void *agent, uint64_t code, int nr_lines,  			     jvmti_line_info_t *li, -			     int nr_lines); +			     const char * const * file_names);  #if defined(__cplusplus)  } diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c index c62c9fc9a525..6add3e982614 100644 --- a/tools/perf/jvmti/libjvmti.c +++ b/tools/perf/jvmti/libjvmti.c @@ -47,6 +47,7 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,  			tab[lines].pc = (unsigned long)pc;  			tab[lines].line_number = loc_tab[i].line_number;  			tab[lines].discrim = 0; /* not yet used */ +			tab[lines].methodID = m;  			lines++;  		} else {  			break; @@ -125,6 +126,99 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **  	return JVMTI_ERROR_NONE;  } +static void +copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length) +{ +	/* +	* Assume path name is class hierarchy, this is a common practice with Java programs +	*/ +	if (*class_sign == 'L') { +		int j, i = 0; +		char *p = strrchr(class_sign, '/'); +		if (p) { +			/* drop the 'L' prefix and copy up to the final '/' */ +			for (i = 0; i < (p - class_sign); i++) +				result[i] = class_sign[i+1]; +		} +		/* +		* append file name, we use loops and not string ops to avoid modifying +		* class_sign which is used later for the symbol name +		*/ +		for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++) +			result[i] = file_name[j]; + +		result[i] = '\0'; +	} else { +		/* fallback case */ +		size_t file_name_len = strlen(file_name); +		strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length); +	} +} + +static jvmtiError +get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer) +{ +	jvmtiError ret; +	jclass decl_class; +	char *file_name = NULL; +	char *class_sign = NULL; +	char fn[PATH_MAX]; +	size_t len; + +	ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class); +	if (ret != JVMTI_ERROR_NONE) { +		print_error(jvmti, "GetMethodDeclaringClass", ret); +		return ret; +	} + +	ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name); +	if (ret != JVMTI_ERROR_NONE) { +		print_error(jvmti, "GetSourceFileName", ret); +		return ret; +	} + +	ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL); +	if (ret != JVMTI_ERROR_NONE) { +		print_error(jvmti, "GetClassSignature", ret); +		goto free_file_name_error; +	} + +	copy_class_filename(class_sign, file_name, fn, PATH_MAX); +	len = strlen(fn); +	*buffer = malloc((len + 1) * sizeof(char)); +	if (!*buffer) { +		print_error(jvmti, "GetClassSignature", ret); +		ret = JVMTI_ERROR_OUT_OF_MEMORY; +		goto free_class_sign_error; +	} +	strcpy(*buffer, fn); +	ret = JVMTI_ERROR_NONE; + +free_class_sign_error: +	(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign); +free_file_name_error: +	(*jvmti)->Deallocate(jvmti, (unsigned char *)file_name); + +	return ret; +} + +static jvmtiError +fill_source_filenames(jvmtiEnv *jvmti, int nr_lines, +		      const jvmti_line_info_t * line_tab, +		      char ** file_names) +{ +	int index; +	jvmtiError ret; + +	for (index = 0; index < nr_lines; ++index) { +		ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index])); +		if (ret != JVMTI_ERROR_NONE) +			return ret; +	} + +	return JVMTI_ERROR_NONE; +} +  static void JNICALL  compiled_method_load_cb(jvmtiEnv *jvmti,  			jmethodID method, @@ -135,16 +229,18 @@ compiled_method_load_cb(jvmtiEnv *jvmti,  			const void *compile_info)  {  	jvmti_line_info_t *line_tab = NULL; +	char ** line_file_names = NULL;  	jclass decl_class;  	char *class_sign = NULL;  	char *func_name = NULL;  	char *func_sign = NULL; -	char *file_name= NULL; +	char *file_name = NULL;  	char fn[PATH_MAX];  	uint64_t addr = (uint64_t)(uintptr_t)code_addr;  	jvmtiError ret;  	int nr_lines = 0; /* in line_tab[] */  	size_t len; +	int output_debug_info = 0;  	ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,  						&decl_class); @@ -158,6 +254,19 @@ compiled_method_load_cb(jvmtiEnv *jvmti,  		if (ret != JVMTI_ERROR_NONE) {  			warnx("jvmti: cannot get line table for method");  			nr_lines = 0; +		} else if (nr_lines > 0) { +			line_file_names = malloc(sizeof(char*) * nr_lines); +			if (!line_file_names) { +				warnx("jvmti: cannot allocate space for line table method names"); +			} else { +				memset(line_file_names, 0, sizeof(char*) * nr_lines); +				ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names); +				if (ret != JVMTI_ERROR_NONE) { +					warnx("jvmti: fill_source_filenames failed"); +				} else { +					output_debug_info = 1; +				} +			}  		}  	} @@ -181,33 +290,14 @@ compiled_method_load_cb(jvmtiEnv *jvmti,  		goto error;  	} -	/* -	 * Assume path name is class hierarchy, this is a common practice with Java programs -	 */ -	if (*class_sign == 'L') { -		int j, i = 0; -		char *p = strrchr(class_sign, '/'); -		if (p) { -			/* drop the 'L' prefix and copy up to the final '/' */ -			for (i = 0; i < (p - class_sign); i++) -				fn[i] = class_sign[i+1]; -		} -		/* -		 * append file name, we use loops and not string ops to avoid modifying -		 * class_sign which is used later for the symbol name -		 */ -		for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++) -			fn[i] = file_name[j]; -		fn[i] = '\0'; -	} else { -		/* fallback case */ -		strcpy(fn, file_name); -	} +	copy_class_filename(class_sign, file_name, fn, PATH_MAX); +  	/*  	 * write source line info record if we have it  	 */ -	if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines)) -		warnx("jvmti: write_debug_info() failed"); +	if (output_debug_info) +		if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names)) +			warnx("jvmti: write_debug_info() failed");  	len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;  	{ @@ -223,6 +313,13 @@ error:  	(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);  	(*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);  	free(line_tab); +	while (line_file_names && (nr_lines > 0)) { +	    if (line_file_names[nr_lines - 1]) { +	        free(line_file_names[nr_lines - 1]); +	    } +	    nr_lines -= 1; +	} +	free(line_file_names);  }  static void JNICALL diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh index 7a84d73324e3..8b3da21a08f1 100755 --- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh @@ -10,8 +10,8 @@  . $(dirname $0)/lib/probe.sh -ld=$(realpath /lib64/ld*.so.* | uniq) -libc=$(echo $ld | sed 's/ld/libc/g') +libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g') +nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254  trace_libc_inet_pton_backtrace() {  	idx=0 @@ -37,6 +37,9 @@ trace_libc_inet_pton_backtrace() {  	done  } +# Check for IPv6 interface existence +ip a sh lo | fgrep -q inet6 || exit 2 +  skip_if_no_perf_probe && \  perf probe -q $libc inet_pton && \  trace_libc_inet_pton_backtrace diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh index 2e68c5f120da..2a9ef080efd0 100755 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh @@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2  file=$(mktemp /tmp/temporary_file.XXXXX)  trace_open_vfs_getname() { -	perf trace -e open touch $file 2>&1 | \ -	egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" +	test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; } + +	perf trace -e ${svc:-open} touch $file 2>&1 | \ +	egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"  } diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index bc4a7344e274..89c8e1604ca7 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c @@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused  	evsel = perf_evlist__first(evlist);  	evsel->attr.task = 1; +#ifdef __s390x__ +	evsel->attr.sample_freq = 1000000; +#else  	evsel->attr.sample_freq = 1; +#endif  	evsel->attr.inherit = 0;  	evsel->attr.watermark = 0;  	evsel->attr.wakeup_events = 1; diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c index 9e1668b2c5d7..417e3ecfe9d7 100644 --- a/tools/perf/trace/beauty/mmap.c +++ b/tools/perf/trace/beauty/mmap.c @@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,  	P_MMAP_FLAG(POPULATE);  	P_MMAP_FLAG(STACK);  	P_MMAP_FLAG(UNINITIALIZED); +#ifdef MAP_SYNC +	P_MMAP_FLAG(SYNC); +#endif  #undef P_MMAP_FLAG  	if (flags) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index da1c4c4a0dd8..3369c7830260 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -165,7 +165,7 @@ static void ins__delete(struct ins_operands *ops)  static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,  			      struct ins_operands *ops)  { -	return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw); +	return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);  }  int ins__scnprintf(struct ins *ins, char *bf, size_t size, @@ -230,12 +230,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,  			   struct ins_operands *ops)  {  	if (ops->target.name) -		return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); +		return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);  	if (ops->target.addr == 0)  		return ins__raw_scnprintf(ins, bf, size, ops); -	return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); +	return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);  }  static struct ins_ops call_ops = { @@ -299,7 +299,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,  			c++;  	} -	return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64, +	return scnprintf(bf, size, "%-6s %.*s%" PRIx64,  			 ins->name, c ? c - ops->raw : 0, ops->raw,  			 ops->target.offset);  } @@ -372,7 +372,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,  	if (ops->locked.ins.ops == NULL)  		return ins__raw_scnprintf(ins, bf, size, ops); -	printed = scnprintf(bf, size, "%-6.6s ", ins->name); +	printed = scnprintf(bf, size, "%-6s ", ins->name);  	return printed + ins__scnprintf(&ops->locked.ins, bf + printed,  					size - printed, ops->locked.ops);  } @@ -448,7 +448,7 @@ out_free_source:  static int mov__scnprintf(struct ins *ins, char *bf, size_t size,  			   struct ins_operands *ops)  { -	return scnprintf(bf, size, "%-6.6s %s,%s", ins->name, +	return scnprintf(bf, size, "%-6s %s,%s", ins->name,  			 ops->source.name ?: ops->source.raw,  			 ops->target.name ?: ops->target.raw);  } @@ -488,7 +488,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops  static int dec__scnprintf(struct ins *ins, char *bf, size_t size,  			   struct ins_operands *ops)  { -	return scnprintf(bf, size, "%-6.6s %s", ins->name, +	return scnprintf(bf, size, "%-6s %s", ins->name,  			 ops->target.name ?: ops->target.raw);  } @@ -500,7 +500,7 @@ static struct ins_ops dec_ops = {  static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,  			  struct ins_operands *ops __maybe_unused)  { -	return scnprintf(bf, size, "%-6.6s", "nop"); +	return scnprintf(bf, size, "%-6s", "nop");  }  static struct ins_ops nop_ops = { @@ -924,7 +924,7 @@ void disasm_line__free(struct disasm_line *dl)  int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)  {  	if (raw || !dl->ins.ops) -		return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw); +		return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);  	return ins__scnprintf(&dl->ins, bf, size, &dl->ops);  } diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index c6c891e154a6..b62e523a7035 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -257,7 +257,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)  		.config = PERF_COUNT_SW_DUMMY,  		.size	= sizeof(attr), /* to capture ABI version */  	}; -	struct perf_evsel *evsel = perf_evsel__new(&attr); +	struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);  	if (evsel == NULL)  		return -ENOMEM; @@ -1786,3 +1786,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,  state_err:  	return;  } + +bool perf_evlist__exclude_kernel(struct perf_evlist *evlist) +{ +	struct perf_evsel *evsel; + +	evlist__for_each_entry(evlist, evsel) { +		if (!evsel->attr.exclude_kernel) +			return false; +	} + +	return true; +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index e72ae64c11ac..491f69542920 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -312,4 +312,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);  struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,  					    union perf_event *event); + +bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);  #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f894893c203d..d5fbcf8c7aa7 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,  	list_for_each_entry(term, config_terms, list) {  		switch (term->type) {  		case PERF_EVSEL__CONFIG_TERM_PERIOD: -			attr->sample_period = term->val.period; -			attr->freq = 0; +			if (!(term->weak && opts->user_interval != ULLONG_MAX)) { +				attr->sample_period = term->val.period; +				attr->freq = 0; +			}  			break;  		case PERF_EVSEL__CONFIG_TERM_FREQ: -			attr->sample_freq = term->val.freq; -			attr->freq = 1; +			if (!(term->weak && opts->user_freq != UINT_MAX)) { +				attr->sample_freq = term->val.freq; +				attr->freq = 1; +			}  			break;  		case PERF_EVSEL__CONFIG_TERM_TIME:  			if (term->val.time) @@ -1371,7 +1375,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,  static int  perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)  { -	struct perf_stat_evsel *ps = leader->priv; +	struct perf_stat_evsel *ps = leader->stats;  	u64 read_format = leader->attr.read_format;  	int size = perf_evsel__read_size(leader);  	u64 *data = ps->group_data; diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 9277df96ffda..157f49e8a772 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -67,6 +67,7 @@ struct perf_evsel_config_term {  		bool	overwrite;  		char	*branch;  	} val; +	bool weak;  };  struct perf_stat_evsel; diff --git a/tools/perf/util/intel-pt-decoder/inat.h b/tools/perf/util/intel-pt-decoder/inat.h index 125ecd2a300d..52dc8d911173 100644 --- a/tools/perf/util/intel-pt-decoder/inat.h +++ b/tools/perf/util/intel-pt-decoder/inat.h @@ -97,6 +97,16 @@  #define INAT_MAKE_GROUP(grp)	((grp << INAT_GRP_OFFS) | INAT_MODRM)  #define INAT_MAKE_IMM(imm)	(imm << INAT_IMM_OFFS) +/* Identifiers for segment registers */ +#define INAT_SEG_REG_IGNORE	0 +#define INAT_SEG_REG_DEFAULT	1 +#define INAT_SEG_REG_CS		2 +#define INAT_SEG_REG_SS		3 +#define INAT_SEG_REG_DS		4 +#define INAT_SEG_REG_ES		5 +#define INAT_SEG_REG_FS		6 +#define INAT_SEG_REG_GS		7 +  /* Attribute search APIs */  extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);  extern int inat_get_last_prefix_id(insn_byte_t last_pfx); diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt index 12e377184ee4..e0b85930dd77 100644 --- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt +++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)  fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)  fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)  fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) -ff: +ff: UD0  EndTable  Table: 3-byte opcode 1 (0x0f 0x38) @@ -717,7 +717,7 @@ AVXcode: 2  7e: vpermt2d/q Vx,Hx,Wx (66),(ev)  7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)  80: INVEPT Gy,Mdq (66) -81: INVPID Gy,Mdq (66) +81: INVVPID Gy,Mdq (66)  82: INVPCID Gy,Mdq (66)  83: vpmultishiftqb Vx,Hx,Wx (66),(ev)  88: vexpandps/d Vpd,Wpd (66),(ev) @@ -896,7 +896,7 @@ EndTable  GrpTable: Grp3_1  0: TEST Eb,Ib -1: +1: TEST Eb,Ib  2: NOT Eb  3: NEG Eb  4: MUL AL,Eb @@ -970,6 +970,15 @@ GrpTable: Grp9  EndTable  GrpTable: Grp10 +# all are UD1 +0: UD1 +1: UD1 +2: UD1 +3: UD1 +4: UD1 +5: UD1 +6: UD1 +7: UD1  EndTable  # Grp11A and Grp11B are expressed as Grp11 in Intel SDM diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 6a8d03c3d9b7..270f3223c6df 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -172,6 +172,9 @@ void machine__exit(struct machine *machine)  {  	int i; +	if (machine == NULL) +		return; +  	machine__destroy_kernel_maps(machine);  	map_groups__exit(&machine->kmaps);  	dsos__exit(&machine->dsos); diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index efd78b827b05..3a5cb5a6e94a 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md);  static inline u64 perf_mmap__read_head(struct perf_mmap *mm)  {  	struct perf_event_mmap_page *pc = mm->base; -	u64 head = ACCESS_ONCE(pc->data_head); +	u64 head = READ_ONCE(pc->data_head);  	rmb();  	return head;  } diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index a7fcd95961ef..170316795a18 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1116,6 +1116,7 @@ do {								\  	INIT_LIST_HEAD(&__t->list);				\  	__t->type       = PERF_EVSEL__CONFIG_TERM_ ## __type;	\  	__t->val.__name = __val;				\ +	__t->weak	= term->weak;				\  	list_add_tail(&__t->list, head_terms);			\  } while (0) @@ -2410,6 +2411,7 @@ static int new_term(struct parse_events_term **_term,  	*term = *temp;  	INIT_LIST_HEAD(&term->list); +	term->weak = false;  	switch (term->type_val) {  	case PARSE_EVENTS__TERM_TYPE_NUM: diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index be337c266697..88108cd11b4c 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -101,6 +101,9 @@ struct parse_events_term {  	/* error string indexes for within parsed string */  	int err_term;  	int err_val; + +	/* Coming from implicit alias */ +	bool weak;  };  struct parse_events_error { diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 07cb2ac041d7..80fb1593913a 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -405,6 +405,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,  			parse_events_terms__purge(&list);  			return ret;  		} +		/* +		 * Weak terms don't override command line options, +		 * which we don't want for implicit terms in aliases. +		 */ +		cloned->weak = true;  		list_add_tail(&cloned->list, &list);  	}  	list_splice(&list, terms); diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c index c25a74ae51ba..2bb3eef7d5c1 100644 --- a/tools/power/cpupower/bench/system.c +++ b/tools/power/cpupower/bench/system.c @@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)  	dprintf("set %s as cpufreq governor\n", governor); -	if (cpupower_is_cpu_online(cpu) != 0) { +	if (cpupower_is_cpu_online(cpu) != 1) {  		perror("cpufreq_cpu_exists");  		fprintf(stderr, "error: cpu %u does not exist\n", cpu);  		return -1; diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c index 1b5da0066ebf..5b3205f16217 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c +++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c @@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)  {  	int num;  	char *tmp; +	int this_cpu; + +	this_cpu = sched_getcpu();  	/* Assume idle state count is the same for all CPUs */ -	cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0); +	cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);  	if (cpuidle_sysfs_monitor.hw_states_num <= 0)  		return NULL;  	for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { -		tmp = cpuidle_state_name(0, num); +		tmp = cpuidle_state_name(this_cpu, num);  		if (tmp == NULL)  			continue; @@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)  		strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);  		free(tmp); -		tmp = cpuidle_state_desc(0, num); +		tmp = cpuidle_state_desc(this_cpu, num);  		if (tmp == NULL)  			continue;  		strncpy(cpuidle_cstates[num].desc, tmp,	CSTATE_DESC_LEN - 1); diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 333a48655ee0..9316e648a880 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,4 +1,5 @@  # SPDX-License-Identifier: GPL-2.0 +  LIBDIR := ../../../lib  BPFDIR := $(LIBDIR)/bpf  APIDIR := ../../../include/uapi @@ -10,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)  endif  CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include -LDLIBS += -lcap -lelf +LDLIBS += -lcap -lelf -lrt  TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \  	test_align test_verifier_log test_dev_cgroup @@ -38,7 +39,7 @@ $(BPFOBJ): force  CLANG ?= clang  LLC   ?= llc -PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) +PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)  # Let newer LLVM versions transparently probe the kernel for availability  # of full BPF instruction set. diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index 8591c89c0828..471bbbdb94db 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {  		.result = REJECT,  		.matches = {  			{4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, -			/* ptr & 0x40 == either 0 or 0x40 */ -			{5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"}, -			/* ptr << 2 == unknown, (4n) */ -			{7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"}, -			/* (4n) + 14 == (4n+2).  We blow our bounds, because -			 * the add could overflow. -			 */ -			{8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"}, -			/* Checked s>=0 */ -			{10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, -			/* packet pointer + nonnegative (4n+2) */ -			{12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, -			{14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, -			/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. -			 * We checked the bounds, but it might have been able -			 * to overflow if the packet pointer started in the -			 * upper half of the address space. -			 * So we did not get a 'range' on R6, and the access -			 * attempt will fail. -			 */ -			{16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, +			/* R5 bitwise operator &= on pointer prohibited */  		}  	},  	{ diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 69427531408d..6761be18a91f 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)  			  info_len != sizeof(struct bpf_map_info) ||  			  strcmp((char *)map_infos[i].name, expected_map_name),  			  "get-map-info(fd)", -			  "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", +			  "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",  			  err, errno,  			  map_infos[i].type, BPF_MAP_TYPE_ARRAY,  			  info_len, sizeof(struct bpf_map_info), @@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)  			  *(int *)prog_infos[i].map_ids != map_infos[i].id ||  			  strcmp((char *)prog_infos[i].name, expected_prog_name),  			  "get-prog-info(fd)", -			  "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", +			  "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",  			  err, errno, i,  			  prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,  			  info_len, sizeof(struct bpf_prog_info), @@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)  		      memcmp(&prog_info, &prog_infos[i], info_len) ||  		      *(int *)prog_info.map_ids != saved_map_id,  		      "get-prog-info(next_id->fd)", -		      "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n", +		      "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",  		      err, errno, info_len, sizeof(struct bpf_prog_info),  		      memcmp(&prog_info, &prog_infos[i], info_len),  		      *(int *)prog_info.map_ids, saved_map_id); @@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)  		      memcmp(&map_info, &map_infos[i], info_len) ||  		      array_value != array_magic_value,  		      "check get-map-info(next_id->fd)", -		      "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n", +		      "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",  		      err, errno, info_len, sizeof(struct bpf_map_info),  		      memcmp(&map_info, &map_infos[i], info_len),  		      array_value, array_magic_value); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3c64f30cf63c..b51017404c62 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -422,9 +422,7 @@ static struct bpf_test tests[] = {  			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),  			BPF_EXIT_INSN(),  		}, -		.errstr_unpriv = "R1 subtraction from stack pointer", -		.result_unpriv = REJECT, -		.errstr = "R1 invalid mem access", +		.errstr = "R1 subtraction from stack pointer",  		.result = REJECT,  	},  	{ @@ -606,7 +604,6 @@ static struct bpf_test tests[] = {  		},  		.errstr = "misaligned stack access",  		.result = REJECT, -		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,  	},  	{  		"invalid map_fd for function call", @@ -1797,7 +1794,6 @@ static struct bpf_test tests[] = {  		},  		.result = REJECT,  		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", -		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,  	},  	{  		"PTR_TO_STACK store/load - bad alignment on reg", @@ -1810,7 +1806,6 @@ static struct bpf_test tests[] = {  		},  		.result = REJECT,  		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", -		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,  	},  	{  		"PTR_TO_STACK store/load - out of bounds low", @@ -1862,9 +1857,8 @@ static struct bpf_test tests[] = {  			BPF_MOV64_IMM(BPF_REG_0, 0),  			BPF_EXIT_INSN(),  		}, -		.result = ACCEPT, -		.result_unpriv = REJECT, -		.errstr_unpriv = "R1 pointer += pointer", +		.result = REJECT, +		.errstr = "R1 pointer += pointer",  	},  	{  		"unpriv: neg pointer", @@ -2592,7 +2586,8 @@ static struct bpf_test tests[] = {  			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,  				    offsetof(struct __sk_buff, data)),  			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), -			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, +				    offsetof(struct __sk_buff, len)),  			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),  			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),  			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), @@ -2899,7 +2894,7 @@ static struct bpf_test tests[] = {  			BPF_MOV64_IMM(BPF_REG_0, 0),  			BPF_EXIT_INSN(),  		}, -		.errstr = "invalid access to packet", +		.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",  		.result = REJECT,  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  	}, @@ -3885,9 +3880,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map2 = { 3, 11 }, -		.errstr_unpriv = "R0 pointer += pointer", -		.errstr = "R0 invalid mem access 'inv'", -		.result_unpriv = REJECT, +		.errstr = "R0 pointer += pointer",  		.result = REJECT,  		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,  	}, @@ -3928,7 +3921,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 4 }, -		.errstr = "R4 invalid mem access", +		.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",  		.result = REJECT,  		.prog_type = BPF_PROG_TYPE_SCHED_CLS  	}, @@ -3949,7 +3942,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 4 }, -		.errstr = "R4 invalid mem access", +		.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",  		.result = REJECT,  		.prog_type = BPF_PROG_TYPE_SCHED_CLS  	}, @@ -3970,7 +3963,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 4 }, -		.errstr = "R4 invalid mem access", +		.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",  		.result = REJECT,  		.prog_type = BPF_PROG_TYPE_SCHED_CLS  	}, @@ -5195,10 +5188,8 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map2 = { 3 }, -		.errstr_unpriv = "R0 bitwise operator &= on pointer", -		.errstr = "invalid mem access 'inv'", +		.errstr = "R0 bitwise operator &= on pointer",  		.result = REJECT, -		.result_unpriv = REJECT,  	},  	{  		"map element value illegal alu op, 2", @@ -5214,10 +5205,8 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map2 = { 3 }, -		.errstr_unpriv = "R0 32-bit pointer arithmetic prohibited", -		.errstr = "invalid mem access 'inv'", +		.errstr = "R0 32-bit pointer arithmetic prohibited",  		.result = REJECT, -		.result_unpriv = REJECT,  	},  	{  		"map element value illegal alu op, 3", @@ -5233,10 +5222,8 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map2 = { 3 }, -		.errstr_unpriv = "R0 pointer arithmetic with /= operator", -		.errstr = "invalid mem access 'inv'", +		.errstr = "R0 pointer arithmetic with /= operator",  		.result = REJECT, -		.result_unpriv = REJECT,  	},  	{  		"map element value illegal alu op, 4", @@ -6019,8 +6006,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map_in_map = { 3 }, -		.errstr = "R1 type=inv expected=map_ptr", -		.errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited", +		.errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",  		.result = REJECT,  	},  	{ @@ -6117,6 +6103,30 @@ static struct bpf_test tests[] = {  		.result = ACCEPT,  	},  	{ +		"ld_abs: tests on r6 and skb data reload helper", +		.insns = { +			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), +			BPF_LD_ABS(BPF_B, 0), +			BPF_LD_ABS(BPF_H, 0), +			BPF_LD_ABS(BPF_W, 0), +			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), +			BPF_MOV64_IMM(BPF_REG_6, 0), +			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), +			BPF_MOV64_IMM(BPF_REG_2, 1), +			BPF_MOV64_IMM(BPF_REG_3, 2), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_skb_vlan_push), +			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), +			BPF_LD_ABS(BPF_B, 0), +			BPF_LD_ABS(BPF_H, 0), +			BPF_LD_ABS(BPF_W, 0), +			BPF_MOV64_IMM(BPF_REG_0, 42), +			BPF_EXIT_INSN(), +		}, +		.prog_type = BPF_PROG_TYPE_SCHED_CLS, +		.result = ACCEPT, +	}, +	{  		"ld_ind: check calling conv, r1",  		.insns = {  			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), @@ -6300,7 +6310,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6324,7 +6334,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6350,7 +6360,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R8 invalid mem access 'inv'", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6375,7 +6385,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R8 invalid mem access 'inv'", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6423,7 +6433,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6494,7 +6504,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6545,7 +6555,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6572,7 +6582,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6598,7 +6608,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6627,7 +6637,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6657,7 +6667,7 @@ static struct bpf_test tests[] = {  			BPF_JMP_IMM(BPF_JA, 0, 0, -7),  		},  		.fixup_map1 = { 4 }, -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  	},  	{ @@ -6685,8 +6695,7 @@ static struct bpf_test tests[] = {  			BPF_EXIT_INSN(),  		},  		.fixup_map1 = { 3 }, -		.errstr_unpriv = "R0 pointer comparison prohibited", -		.errstr = "R0 min value is negative", +		.errstr = "unbounded min value",  		.result = REJECT,  		.result_unpriv = REJECT,  	}, @@ -6742,6 +6751,462 @@ static struct bpf_test tests[] = {  		.result = REJECT,  	},  	{ +		"bounds check based on zero-extended MOV", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), +			/* r2 = 0x0000'0000'ffff'ffff */ +			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), +			/* r2 = 0 */ +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), +			/* no-op */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), +			/* access at offset 0 */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.result = ACCEPT +	}, +	{ +		"bounds check based on sign-extended MOV. test1", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), +			/* r2 = 0xffff'ffff'ffff'ffff */ +			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), +			/* r2 = 0xffff'ffff */ +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), +			/* r0 = <oob pointer> */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), +			/* access to OOB pointer */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.errstr = "map_value pointer and 4294967295", +		.result = REJECT +	}, +	{ +		"bounds check based on sign-extended MOV. test2", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), +			/* r2 = 0xffff'ffff'ffff'ffff */ +			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), +			/* r2 = 0xfff'ffff */ +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36), +			/* r0 = <oob pointer> */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), +			/* access to OOB pointer */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.errstr = "R0 min value is outside of the array range", +		.result = REJECT +	}, +	{ +		"bounds check based on reg_off + var_off + insn_off. test1", +		.insns = { +			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, +				    offsetof(struct __sk_buff, mark)), +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), +			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1), +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 4 }, +		.errstr = "value_size=8 off=1073741825", +		.result = REJECT, +		.prog_type = BPF_PROG_TYPE_SCHED_CLS, +	}, +	{ +		"bounds check based on reg_off + var_off + insn_off. test2", +		.insns = { +			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, +				    offsetof(struct __sk_buff, mark)), +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), +			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1), +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 4 }, +		.errstr = "value 1073741823", +		.result = REJECT, +		.prog_type = BPF_PROG_TYPE_SCHED_CLS, +	}, +	{ +		"bounds check after truncation of non-boundary-crossing range", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), +			/* r1 = [0x00, 0xff] */ +			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), +			BPF_MOV64_IMM(BPF_REG_2, 1), +			/* r2 = 0x10'0000'0000 */ +			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36), +			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), +			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), +			/* r1 = [0x00, 0xff] */ +			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff), +			/* r1 = 0 */ +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), +			/* no-op */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), +			/* access at offset 0 */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.result = ACCEPT +	}, +	{ +		"bounds check after truncation of boundary-crossing range (1)", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), +			/* r1 = [0x00, 0xff] */ +			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), +			/* r1 = [0xffff'ff80, 0x1'0000'007f] */ +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), +			/* r1 = [0xffff'ff80, 0xffff'ffff] or +			 *      [0x0000'0000, 0x0000'007f] +			 */ +			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0), +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), +			/* r1 = [0x00, 0xff] or +			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] +			 */ +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), +			/* r1 = 0 or +			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] +			 */ +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), +			/* no-op or OOB pointer computation */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), +			/* potentially OOB access */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		/* not actually fully unbounded, but the bound is very high */ +		.errstr = "R0 unbounded memory access", +		.result = REJECT +	}, +	{ +		"bounds check after truncation of boundary-crossing range (2)", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), +			/* r1 = [0x00, 0xff] */ +			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), +			/* r1 = [0xffff'ff80, 0x1'0000'007f] */ +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), +			/* r1 = [0xffff'ff80, 0xffff'ffff] or +			 *      [0x0000'0000, 0x0000'007f] +			 * difference to previous test: truncation via MOV32 +			 * instead of ALU32. +			 */ +			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), +			/* r1 = [0x00, 0xff] or +			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] +			 */ +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), +			/* r1 = 0 or +			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] +			 */ +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), +			/* no-op or OOB pointer computation */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), +			/* potentially OOB access */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		/* not actually fully unbounded, but the bound is very high */ +		.errstr = "R0 unbounded memory access", +		.result = REJECT +	}, +	{ +		"bounds check after wrapping 32-bit addition", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), +			/* r1 = 0x7fff'ffff */ +			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff), +			/* r1 = 0xffff'fffe */ +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), +			/* r1 = 0 */ +			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2), +			/* no-op */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), +			/* access at offset 0 */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.result = ACCEPT +	}, +	{ +		"bounds check after shift with oversized count operand", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), +			BPF_MOV64_IMM(BPF_REG_2, 32), +			BPF_MOV64_IMM(BPF_REG_1, 1), +			/* r1 = (u32)1 << (u32)32 = ? */ +			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), +			/* r1 = [0x0000, 0xffff] */ +			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff), +			/* computes unknown pointer, potentially OOB */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), +			/* potentially OOB access */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.errstr = "R0 max value is outside of the array range", +		.result = REJECT +	}, +	{ +		"bounds check after right shift of maybe-negative number", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), +			/* r1 = [0x00, 0xff] */ +			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), +			/* r1 = [-0x01, 0xfe] */ +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), +			/* r1 = 0 or 0xff'ffff'ffff'ffff */ +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), +			/* r1 = 0 or 0xffff'ffff'ffff */ +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), +			/* computes unknown pointer, potentially OOB */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), +			/* potentially OOB access */ +			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), +			/* exit */ +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.errstr = "R0 unbounded memory access", +		.result = REJECT +	}, +	{ +		"bounds check map access with off+size signed 32bit overflow. test1", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), +			BPF_EXIT_INSN(), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe), +			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), +			BPF_JMP_A(0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.errstr = "map_value pointer and 2147483646", +		.result = REJECT +	}, +	{ +		"bounds check map access with off+size signed 32bit overflow. test2", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), +			BPF_EXIT_INSN(), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), +			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), +			BPF_JMP_A(0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.errstr = "pointer offset 1073741822", +		.result = REJECT +	}, +	{ +		"bounds check map access with off+size signed 32bit overflow. test3", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), +			BPF_EXIT_INSN(), +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), +			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), +			BPF_JMP_A(0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.errstr = "pointer offset -1073741822", +		.result = REJECT +	}, +	{ +		"bounds check map access with off+size signed 32bit overflow. test4", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), +			BPF_EXIT_INSN(), +			BPF_MOV64_IMM(BPF_REG_1, 1000000), +			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000), +			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), +			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), +			BPF_JMP_A(0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.errstr = "map_value pointer and 1000000000000", +		.result = REJECT +	}, +	{ +		"pointer/scalar confusion in state equality check (way 1)", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), +			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), +			BPF_JMP_A(1), +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), +			BPF_JMP_A(0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.result = ACCEPT, +		.result_unpriv = REJECT, +		.errstr_unpriv = "R0 leaks addr as return value" +	}, +	{ +		"pointer/scalar confusion in state equality check (way 2)", +		.insns = { +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), +			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), +			BPF_JMP_A(1), +			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 3 }, +		.result = ACCEPT, +		.result_unpriv = REJECT, +		.errstr_unpriv = "R0 leaks addr as return value" +	}, +	{  		"variable-offset ctx access",  		.insns = {  			/* Get an unknown value */ @@ -6783,6 +7248,71 @@ static struct bpf_test tests[] = {  		.prog_type = BPF_PROG_TYPE_LWT_IN,  	},  	{ +		"indirect variable-offset stack access", +		.insns = { +			/* Fill the top 8 bytes of the stack */ +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), +			/* Get an unknown value */ +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), +			/* Make it small and 4-byte aligned */ +			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), +			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), +			/* add it to fp.  We now have either fp-4 or fp-8, but +			 * we don't know which +			 */ +			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), +			/* dereference it indirectly */ +			BPF_LD_MAP_FD(BPF_REG_1, 0), +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, +				     BPF_FUNC_map_lookup_elem), +			BPF_MOV64_IMM(BPF_REG_0, 0), +			BPF_EXIT_INSN(), +		}, +		.fixup_map1 = { 5 }, +		.errstr = "variable stack read R2", +		.result = REJECT, +		.prog_type = BPF_PROG_TYPE_LWT_IN, +	}, +	{ +		"direct stack access with 32-bit wraparound. test1", +		.insns = { +			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), +			BPF_MOV32_IMM(BPF_REG_0, 0), +			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), +			BPF_EXIT_INSN() +		}, +		.errstr = "fp pointer and 2147483647", +		.result = REJECT +	}, +	{ +		"direct stack access with 32-bit wraparound. test2", +		.insns = { +			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), +			BPF_MOV32_IMM(BPF_REG_0, 0), +			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), +			BPF_EXIT_INSN() +		}, +		.errstr = "fp pointer and 1073741823", +		.result = REJECT +	}, +	{ +		"direct stack access with 32-bit wraparound. test3", +		.insns = { +			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), +			BPF_MOV32_IMM(BPF_REG_0, 0), +			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), +			BPF_EXIT_INSN() +		}, +		.errstr = "fp pointer offset 1073741822", +		.result = REJECT +	}, +	{  		"liveness pruning and write screening",  		.insns = {  			/* Get an unknown value */ @@ -7104,6 +7634,19 @@ static struct bpf_test tests[] = {  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  	},  	{ +		"pkt_end - pkt_start is allowed", +		.insns = { +			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, +				    offsetof(struct __sk_buff, data_end)), +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, +				    offsetof(struct __sk_buff, data)), +			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), +			BPF_EXIT_INSN(), +		}, +		.result = ACCEPT, +		.prog_type = BPF_PROG_TYPE_SCHED_CLS, +	}, +	{  		"XDP pkt read, pkt_end mangling, bad access 1",  		.insns = {  			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -7118,7 +7661,7 @@ static struct bpf_test tests[] = {  			BPF_MOV64_IMM(BPF_REG_0, 0),  			BPF_EXIT_INSN(),  		}, -		.errstr = "R1 offset is outside of the packet", +		.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",  		.result = REJECT,  		.prog_type = BPF_PROG_TYPE_XDP,  	}, @@ -7137,7 +7680,7 @@ static struct bpf_test tests[] = {  			BPF_MOV64_IMM(BPF_REG_0, 0),  			BPF_EXIT_INSN(),  		}, -		.errstr = "R1 offset is outside of the packet", +		.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",  		.result = REJECT,  		.prog_type = BPF_PROG_TYPE_XDP,  	}, diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c index 3cc0b561489e..e9626cf5607a 100644 --- a/tools/testing/selftests/bpf/test_verifier_log.c +++ b/tools/testing/selftests/bpf/test_verifier_log.c @@ -3,6 +3,8 @@  #include <stdio.h>  #include <string.h>  #include <unistd.h> +#include <sys/time.h> +#include <sys/resource.h>  #include <linux/bpf.h>  #include <linux/filter.h> @@ -131,11 +133,16 @@ static void test_log_bad(char *log, size_t log_len, int log_level)  int main(int argc, char **argv)  { +	struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };  	char full_log[LOG_SIZE];  	char log[LOG_SIZE];  	size_t want_len;  	int i; +	/* allow unlimited locked memory to have more consistent error code */ +	if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0) +		perror("Unable to lift memlock rlimit"); +  	memset(log, 1, LOG_SIZE);  	/* Test incorrect attr */ diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index e57b4ac40e72..7177bea1fdfa 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config @@ -1,3 +1,4 @@  CONFIG_USER_NS=y  CONFIG_BPF_SYSCALL=y  CONFIG_TEST_BPF=m +CONFIG_NUMA=y diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 939a337128db..5d4f10ac2af2 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -7,7 +7,7 @@ include ../lib.mk  TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \  			check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ -			protection_keys test_vdso +			protection_keys test_vdso test_vsyscall  TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \  			test_FCMOV test_FCOMI test_FISTTP \  			vdso_restorer diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 66e5ce5b91f0..1aef72df20a1 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -122,8 +122,7 @@ static void check_valid_segment(uint16_t index, int ldt,  	 * NB: Different Linux versions do different things with the  	 * accessed bit in set_thread_area().  	 */ -	if (ar != expected_ar && -	    (ldt || ar != (expected_ar | AR_ACCESSED))) { +	if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) {  		printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",  		       (ldt ? "LDT" : "GDT"), index, ar, expected_ar);  		nerrs++; @@ -627,13 +626,10 @@ static void do_multicpu_tests(void)  static int finish_exec_test(void)  {  	/* -	 * In a sensible world, this would be check_invalid_segment(0, 1); -	 * For better or for worse, though, the LDT is inherited across exec. -	 * We can probably change this safely, but for now we test it. +	 * Older kernel versions did inherit the LDT on exec() which is +	 * wrong because exec() starts from a clean state.  	 */ -	check_valid_segment(0, 1, -			    AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB, -			    42, true); +	check_invalid_segment(0, 1);  	return nerrs ? 1 : 0;  } diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c new file mode 100644 index 000000000000..7a744fa7b786 --- /dev/null +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -0,0 +1,500 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define _GNU_SOURCE + +#include <stdio.h> +#include <sys/time.h> +#include <time.h> +#include <stdlib.h> +#include <sys/syscall.h> +#include <unistd.h> +#include <dlfcn.h> +#include <string.h> +#include <inttypes.h> +#include <signal.h> +#include <sys/ucontext.h> +#include <errno.h> +#include <err.h> +#include <sched.h> +#include <stdbool.h> +#include <setjmp.h> + +#ifdef __x86_64__ +# define VSYS(x) (x) +#else +# define VSYS(x) 0 +#endif + +#ifndef SYS_getcpu +# ifdef __x86_64__ +#  define SYS_getcpu 309 +# else +#  define SYS_getcpu 318 +# endif +#endif + +static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), +		       int flags) +{ +	struct sigaction sa; +	memset(&sa, 0, sizeof(sa)); +	sa.sa_sigaction = handler; +	sa.sa_flags = SA_SIGINFO | flags; +	sigemptyset(&sa.sa_mask); +	if (sigaction(sig, &sa, 0)) +		err(1, "sigaction"); +} + +/* vsyscalls and vDSO */ +bool should_read_vsyscall = false; + +typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz); +gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000); +gtod_t vdso_gtod; + +typedef int (*vgettime_t)(clockid_t, struct timespec *); +vgettime_t vdso_gettime; + +typedef long (*time_func_t)(time_t *t); +time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400); +time_func_t vdso_time; + +typedef long (*getcpu_t)(unsigned *, unsigned *, void *); +getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800); +getcpu_t vdso_getcpu; + +static void init_vdso(void) +{ +	void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); +	if (!vdso) +		vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); +	if (!vdso) { +		printf("[WARN]\tfailed to find vDSO\n"); +		return; +	} + +	vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday"); +	if (!vdso_gtod) +		printf("[WARN]\tfailed to find gettimeofday in vDSO\n"); + +	vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime"); +	if (!vdso_gettime) +		printf("[WARN]\tfailed to find clock_gettime in vDSO\n"); + +	vdso_time = (time_func_t)dlsym(vdso, "__vdso_time"); +	if (!vdso_time) +		printf("[WARN]\tfailed to find time in vDSO\n"); + +	vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu"); +	if (!vdso_getcpu) { +		/* getcpu() was never wired up in the 32-bit vDSO. */ +		printf("[%s]\tfailed to find getcpu in vDSO\n", +		       sizeof(long) == 8 ? "WARN" : "NOTE"); +	} +} + +static int init_vsys(void) +{ +#ifdef __x86_64__ +	int nerrs = 0; +	FILE *maps; +	char line[128]; +	bool found = false; + +	maps = fopen("/proc/self/maps", "r"); +	if (!maps) { +		printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n"); +		should_read_vsyscall = true; +		return 0; +	} + +	while (fgets(line, sizeof(line), maps)) { +		char r, x; +		void *start, *end; +		char name[128]; +		if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", +			   &start, &end, &r, &x, name) != 5) +			continue; + +		if (strcmp(name, "[vsyscall]")) +			continue; + +		printf("\tvsyscall map: %s", line); + +		if (start != (void *)0xffffffffff600000 || +		    end != (void *)0xffffffffff601000) { +			printf("[FAIL]\taddress range is nonsense\n"); +			nerrs++; +		} + +		printf("\tvsyscall permissions are %c-%c\n", r, x); +		should_read_vsyscall = (r == 'r'); +		if (x != 'x') { +			vgtod = NULL; +			vtime = NULL; +			vgetcpu = NULL; +		} + +		found = true; +		break; +	} + +	fclose(maps); + +	if (!found) { +		printf("\tno vsyscall map in /proc/self/maps\n"); +		should_read_vsyscall = false; +		vgtod = NULL; +		vtime = NULL; +		vgetcpu = NULL; +	} + +	return nerrs; +#else +	return 0; +#endif +} + +/* syscalls */ +static inline long sys_gtod(struct timeval *tv, struct timezone *tz) +{ +	return syscall(SYS_gettimeofday, tv, tz); +} + +static inline int sys_clock_gettime(clockid_t id, struct timespec *ts) +{ +	return syscall(SYS_clock_gettime, id, ts); +} + +static inline long sys_time(time_t *t) +{ +	return syscall(SYS_time, t); +} + +static inline long sys_getcpu(unsigned * cpu, unsigned * node, +			      void* cache) +{ +	return syscall(SYS_getcpu, cpu, node, cache); +} + +static jmp_buf jmpbuf; + +static void sigsegv(int sig, siginfo_t *info, void *ctx_void) +{ +	siglongjmp(jmpbuf, 1); +} + +static double tv_diff(const struct timeval *a, const struct timeval *b) +{ +	return (double)(a->tv_sec - b->tv_sec) + +		(double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6; +} + +static int check_gtod(const struct timeval *tv_sys1, +		      const struct timeval *tv_sys2, +		      const struct timezone *tz_sys, +		      const char *which, +		      const struct timeval *tv_other, +		      const struct timezone *tz_other) +{ +	int nerrs = 0; +	double d1, d2; + +	if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) { +		printf("[FAIL] %s tz mismatch\n", which); +		nerrs++; +	} + +	d1 = tv_diff(tv_other, tv_sys1); +	d2 = tv_diff(tv_sys2, tv_other);  +	printf("\t%s time offsets: %lf %lf\n", which, d1, d2); + +	if (d1 < 0 || d2 < 0) { +		printf("[FAIL]\t%s time was inconsistent with the syscall\n", which); +		nerrs++; +	} else { +		printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which); +	} + +	return nerrs; +} + +static int test_gtod(void) +{ +	struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys; +	struct timezone tz_sys, tz_vdso, tz_vsys; +	long ret_vdso = -1; +	long ret_vsys = -1; +	int nerrs = 0; + +	printf("[RUN]\ttest gettimeofday()\n"); + +	if (sys_gtod(&tv_sys1, &tz_sys) != 0) +		err(1, "syscall gettimeofday"); +	if (vdso_gtod) +		ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso); +	if (vgtod) +		ret_vsys = vgtod(&tv_vsys, &tz_vsys); +	if (sys_gtod(&tv_sys2, &tz_sys) != 0) +		err(1, "syscall gettimeofday"); + +	if (vdso_gtod) { +		if (ret_vdso == 0) { +			nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso); +		} else { +			printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso); +			nerrs++; +		} +	} + +	if (vgtod) { +		if (ret_vsys == 0) { +			nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys); +		} else { +			printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys); +			nerrs++; +		} +	} + +	return nerrs; +} + +static int test_time(void) { +	int nerrs = 0; + +	printf("[RUN]\ttest time()\n"); +	long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0; +	long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1; +	t_sys1 = sys_time(&t2_sys1); +	if (vdso_time) +		t_vdso = vdso_time(&t2_vdso); +	if (vtime) +		t_vsys = vtime(&t2_vsys); +	t_sys2 = sys_time(&t2_sys2); +	if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) { +		printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2); +		nerrs++; +		return nerrs; +	} + +	if (vdso_time) { +		if (t_vdso < 0 || t_vdso != t2_vdso) { +			printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso); +			nerrs++; +		} else if (t_vdso < t_sys1 || t_vdso > t_sys2) { +			printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2); +			nerrs++; +		} else { +			printf("[OK]\tvDSO time() is okay\n"); +		} +	} + +	if (vtime) { +		if (t_vsys < 0 || t_vsys != t2_vsys) { +			printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys); +			nerrs++; +		} else if (t_vsys < t_sys1 || t_vsys > t_sys2) { +			printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2); +			nerrs++; +		} else { +			printf("[OK]\tvsyscall time() is okay\n"); +		} +	} + +	return nerrs; +} + +static int test_getcpu(int cpu) +{ +	int nerrs = 0; +	long ret_sys, ret_vdso = -1, ret_vsys = -1; + +	printf("[RUN]\tgetcpu() on CPU %d\n", cpu); + +	cpu_set_t cpuset; +	CPU_ZERO(&cpuset); +	CPU_SET(cpu, &cpuset); +	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) { +		printf("[SKIP]\tfailed to force CPU %d\n", cpu); +		return nerrs; +	} + +	unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys; +	unsigned node = 0; +	bool have_node = false; +	ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0); +	if (vdso_getcpu) +		ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0); +	if (vgetcpu) +		ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0); + +	if (ret_sys == 0) { +		if (cpu_sys != cpu) { +			printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu); +			nerrs++; +		} + +		have_node = true; +		node = node_sys; +	} + +	if (vdso_getcpu) { +		if (ret_vdso) { +			printf("[FAIL]\tvDSO getcpu() failed\n"); +			nerrs++; +		} else { +			if (!have_node) { +				have_node = true; +				node = node_vdso; +			} + +			if (cpu_vdso != cpu) { +				printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu); +				nerrs++; +			} else { +				printf("[OK]\tvDSO reported correct CPU\n"); +			} + +			if (node_vdso != node) { +				printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node); +				nerrs++; +			} else { +				printf("[OK]\tvDSO reported correct node\n"); +			} +		} +	} + +	if (vgetcpu) { +		if (ret_vsys) { +			printf("[FAIL]\tvsyscall getcpu() failed\n"); +			nerrs++; +		} else { +			if (!have_node) { +				have_node = true; +				node = node_vsys; +			} + +			if (cpu_vsys != cpu) { +				printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu); +				nerrs++; +			} else { +				printf("[OK]\tvsyscall reported correct CPU\n"); +			} + +			if (node_vsys != node) { +				printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node); +				nerrs++; +			} else { +				printf("[OK]\tvsyscall reported correct node\n"); +			} +		} +	} + +	return nerrs; +} + +static int test_vsys_r(void) +{ +#ifdef __x86_64__ +	printf("[RUN]\tChecking read access to the vsyscall page\n"); +	bool can_read; +	if (sigsetjmp(jmpbuf, 1) == 0) { +		*(volatile int *)0xffffffffff600000; +		can_read = true; +	} else { +		can_read = false; +	} + +	if (can_read && !should_read_vsyscall) { +		printf("[FAIL]\tWe have read access, but we shouldn't\n"); +		return 1; +	} else if (!can_read && should_read_vsyscall) { +		printf("[FAIL]\tWe don't have read access, but we should\n"); +		return 1; +	} else { +		printf("[OK]\tgot expected result\n"); +	} +#endif + +	return 0; +} + + +#ifdef __x86_64__ +#define X86_EFLAGS_TF (1UL << 8) +static volatile sig_atomic_t num_vsyscall_traps; + +static unsigned long get_eflags(void) +{ +	unsigned long eflags; +	asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags)); +	return eflags; +} + +static void set_eflags(unsigned long eflags) +{ +	asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags"); +} + +static void sigtrap(int sig, siginfo_t *info, void *ctx_void) +{ +	ucontext_t *ctx = (ucontext_t *)ctx_void; +	unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP]; + +	if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0) +		num_vsyscall_traps++; +} + +static int test_native_vsyscall(void) +{ +	time_t tmp; +	bool is_native; + +	if (!vtime) +		return 0; + +	printf("[RUN]\tchecking for native vsyscall\n"); +	sethandler(SIGTRAP, sigtrap, 0); +	set_eflags(get_eflags() | X86_EFLAGS_TF); +	vtime(&tmp); +	set_eflags(get_eflags() & ~X86_EFLAGS_TF); + +	/* +	 * If vsyscalls are emulated, we expect a single trap in the +	 * vsyscall page -- the call instruction will trap with RIP +	 * pointing to the entry point before emulation takes over. +	 * In native mode, we expect two traps, since whatever code +	 * the vsyscall page contains will be more than just a ret +	 * instruction. +	 */ +	is_native = (num_vsyscall_traps > 1); + +	printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n", +	       (is_native ? "native" : "emulated"), +	       (int)num_vsyscall_traps); + +	return 0; +} +#endif + +int main(int argc, char **argv) +{ +	int nerrs = 0; + +	init_vdso(); +	nerrs += init_vsys(); + +	nerrs += test_gtod(); +	nerrs += test_time(); +	nerrs += test_getcpu(0); +	nerrs += test_getcpu(1); + +	sethandler(SIGSEGV, sigsegv, 0); +	nerrs += test_vsys_r(); + +#ifdef __x86_64__ +	nerrs += test_native_vsyscall(); +#endif + +	return nerrs ? 1 : 0; +} diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index 5727dfb15a83..c9c81614a66a 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c @@ -50,14 +50,14 @@ static int parse_status(const char *value)  	while (*c != '\0') {  		int port, status, speed, devid; -		unsigned long socket; +		int sockfd;  		char lbusid[SYSFS_BUS_ID_SIZE];  		struct usbip_imported_device *idev;  		char hub[3]; -		ret = sscanf(c, "%2s  %d %d %d %x %lx %31s\n", +		ret = sscanf(c, "%2s  %d %d %d %x %u %31s\n",  				hub, &port, &status, &speed, -				&devid, &socket, lbusid); +				&devid, &sockfd, lbusid);  		if (ret < 5) {  			dbg("sscanf failed: %d", ret); @@ -66,7 +66,7 @@ static int parse_status(const char *value)  		dbg("hub %s port %d status %d speed %d devid %x",  				hub, port, status, speed, devid); -		dbg("socket %lx lbusid %s", socket, lbusid); +		dbg("sockfd %u lbusid %s", sockfd, lbusid);  		/* if a device is connected, look at it */  		idev = &vhci_driver->idev[port]; @@ -106,7 +106,7 @@ static int parse_status(const char *value)  	return 0;  } -#define MAX_STATUS_NAME 16 +#define MAX_STATUS_NAME 18  static int refresh_imported_device_list(void)  { @@ -329,9 +329,17 @@ err:  int usbip_vhci_get_free_port(uint32_t speed)  {  	for (int i = 0; i < vhci_driver->nports; i++) { -		if (speed == USB_SPEED_SUPER && -		    vhci_driver->idev[i].hub != HUB_SPEED_SUPER) -			continue; + +		switch (speed) { +		case	USB_SPEED_SUPER: +			if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER) +				continue; +		break; +		default: +			if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH) +				continue; +		break; +		}  		if (vhci_driver->idev[i].status == VDEV_ST_NULL)  			return vhci_driver->idev[i].port; diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c index 2b3d6d235015..3d7b42e77299 100644 --- a/tools/usb/usbip/src/utils.c +++ b/tools/usb/usbip/src/utils.c @@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)  	char command[SYSFS_BUS_ID_SIZE + 4];  	char match_busid_attr_path[SYSFS_PATH_MAX];  	int rc; +	int cmd_size;  	snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),  		 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, @@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)  		 attr_name);  	if (add) -		snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); +		cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", +				    busid);  	else -		snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); +		cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", +				    busid);  	rc = write_sysfs_attribute(match_busid_attr_path, command, -				   sizeof(command)); +				   cmd_size);  	if (rc < 0) {  		dbg("failed to write match_busid: %s", strerror(errno));  		return -1; diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c index 38bb171aceba..e6e81305ef46 100644 --- a/tools/virtio/ringtest/ptr_ring.c +++ b/tools/virtio/ringtest/ptr_ring.c @@ -16,24 +16,41 @@  #define unlikely(x)    (__builtin_expect(!!(x), 0))  #define likely(x)    (__builtin_expect(!!(x), 1))  #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) +#define SIZE_MAX        (~(size_t)0) +  typedef pthread_spinlock_t  spinlock_t;  typedef int gfp_t; -static void *kmalloc(unsigned size, gfp_t gfp) -{ -	return memalign(64, size); -} +#define __GFP_ZERO 0x1 -static void *kzalloc(unsigned size, gfp_t gfp) +static void *kmalloc(unsigned size, gfp_t gfp)  {  	void *p = memalign(64, size);  	if (!p)  		return p; -	memset(p, 0, size); +	if (gfp & __GFP_ZERO) +		memset(p, 0, size);  	return p;  } +static inline void *kzalloc(unsigned size, gfp_t flags) +{ +	return kmalloc(size, flags | __GFP_ZERO); +} + +static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) +{ +	if (size != 0 && n > SIZE_MAX / size) +		return NULL; +	return kmalloc(n * size, flags); +} + +static inline void *kcalloc(size_t n, size_t size, gfp_t flags) +{ +	return kmalloc_array(n, size, flags | __GFP_ZERO); +} +  static void kfree(void *p)  {  	if (p) diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh index 35b039864b77..0cf28aa6f21c 100644 --- a/tools/vm/slabinfo-gnuplot.sh +++ b/tools/vm/slabinfo-gnuplot.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash  # Sergey Senozhatsky, 2015  # sergey.senozhatsky.work@gmail.com  | 

