diff options
Diffstat (limited to 'include')
114 files changed, 1100 insertions, 523 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c602c7718421..ddabed1f51c2 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -169,7 +169,8 @@ struct acpi_device_flags { u32 ejectable:1; u32 power_manageable:1; u32 match_driver:1; - u32 reserved:27; + u32 no_hotplug:1; + u32 reserved:26; }; /* File System */ @@ -344,6 +345,7 @@ extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); +void acpi_bus_no_hotplug(acpi_handle handle); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 639d7a4d033b..6f692f8ac664 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -1,4 +1,5 @@ -/* Generic barrier definitions, based on MN10300 definitions. +/* + * Generic barrier definitions, originally based on MN10300 definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. @@ -16,35 +17,65 @@ #ifndef __ASSEMBLY__ -#define nop() asm volatile ("nop") +#include <linux/compiler.h> + +#ifndef nop +#define nop() asm volatile ("nop") +#endif /* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. * - * This implementation only contains a compiler barrier. + * Fall back to compiler barriers if nothing better is provided. */ -#define mb() asm volatile ("": : :"memory") +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb #define rmb() mb() -#define wmb() asm volatile ("": : :"memory") +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#endif + +#ifndef set_mb +#define set_mb(var, value) do { (var) = (value); mb(); } while (0) #endif -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f330d28e4d0e..db0923458940 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif #ifndef pte_accessible -# define pte_accessible(pte) ((void)(pte),1) +# define pte_accessible(mm, pte) ((void)(pte), 1) #endif #ifndef flush_tlb_fix_spurious_fault @@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif - if (pmd_none(pmdval)) + if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { - if (!pmd_trans_huge(pmdval)) - pmd_clear_bad(pmd); + pmd_clear_bad(pmd); return 1; } return 0; diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index ddf2b420ac8f..1cd3f5d767a8 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -3,13 +3,11 @@ #include <linux/thread_info.h> -/* - * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users - * that think a non-zero value indicates we cannot preempt. - */ +#define PREEMPT_ENABLED (0) + static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; + return current_thread_info()->preempt_count; } static __always_inline int *preempt_count_ptr(void) @@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void) return ¤t_thread_info()->preempt_count; } -/* - * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the - * alternative is loosing a reschedule. Better schedule too often -- also this - * should be a very rare operation. - */ static __always_inline void preempt_count_set(int pc) { *preempt_count_ptr() = pc; @@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc) task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ } while (0) -/* - * We fold the NEED_RESCHED bit into the preempt count such that - * preempt_enable() can decrement and test for needing to reschedule with a - * single instruction. - * - * We invert the actual bit, so that when the decrement hits 0 we know we both - * need to resched (the bit is cleared) and can resched (no preempt count). - */ - static __always_inline void set_preempt_need_resched(void) { - *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; } static __always_inline void clear_preempt_need_resched(void) { - *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; } static __always_inline bool test_preempt_need_resched(void) { - return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); + return false; } /* @@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val) static __always_inline bool __preempt_count_dec_and_test(void) { - return !--*preempt_count_ptr(); + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); } /* @@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) */ static __always_inline bool should_resched(void) { - return unlikely(!*preempt_count_ptr()); + return unlikely(!preempt_count() && tif_need_resched()); } #ifdef CONFIG_PREEMPT diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 3f21f1b72e45..d3909effd725 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -49,4 +49,12 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +#ifndef zero_bytemask +#ifdef CONFIG_64BIT +#define zero_bytemask(mask) (~0ul << fls64(mask)) +#else +#define zero_bytemask(mask) (~0ul << fls(mask)) +#endif /* CONFIG_64BIT */ +#endif /* zero_bytemask */ + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 13621cc8cf4c..6a626a507b8c 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -36,6 +36,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, { sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); sg1[num - 1].page_link &= ~0x02; + sg1[num - 1].page_link |= 0x01; } static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) @@ -43,7 +44,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) if (sg_is_last(sg)) return NULL; - return (++sg)->length ? sg : (void *)sg_page(sg); + return (++sg)->length ? sg : sg_chain_ptr(sg); } static inline void scatterwalk_crypto_chain(struct scatterlist *head, diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 87578c109e48..49376aec2fbb 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -600,7 +600,7 @@ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ - {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h index 9a193b84238a..a89df3be1686 100644 --- a/include/linux/assoc_array.h +++ b/include/linux/assoc_array.h @@ -41,10 +41,10 @@ struct assoc_array_ops { /* Is this the object we're looking for? */ bool (*compare_object)(const void *object, const void *index_key); - /* How different are two objects, to a bit position in their keys? (or - * -1 if they're the same) + /* How different is an object from an index key, to a bit position in + * their keys? (or -1 if they're the same) */ - int (*diff_objects)(const void *a, const void *b); + int (*diff_objects)(const void *object, const void *index_key); /* Method to free an object. */ void (*free_object)(void *object); diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index 669fef5c745a..3e0fbe441763 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -3,6 +3,6 @@ #include <uapi/linux/auxvec.h> -#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ +#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ #endif /* _LINUX_AUXVEC_H */ diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 27b1bcffe408..86c12c93e3cf 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -1,9 +1,35 @@ #ifndef _LINUX_BH_H #define _LINUX_BH_H -extern void local_bh_disable(void); +#include <linux/preempt.h> +#include <linux/preempt_mask.h> + +#ifdef CONFIG_TRACE_IRQFLAGS +extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); +#else +static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) +{ + preempt_count_add(cnt); + barrier(); +} +#endif + +static inline void local_bh_disable(void) +{ + __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); +} + extern void _local_bh_enable(void); -extern void local_bh_enable(void); -extern void local_bh_enable_ip(unsigned long ip); +extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); + +static inline void local_bh_enable_ip(unsigned long ip) +{ + __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); +} + +static inline void local_bh_enable(void) +{ + __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); +} #endif /* _LINUX_BH_H */ diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 973ce10c40b6..dc1bd3dcf11f 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -28,8 +28,6 @@ #endif -#define uninitialized_var(x) x - #ifndef __HAVE_BUILTIN_BSWAP16__ /* icc has this, but it's called _bswap16 */ #define __HAVE_BUILTIN_BSWAP16__ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 92669cd182a6..fe7a686dfd8d 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -298,6 +298,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) #endif +/* Is this type a native word size -- useful for atomic operations */ +#ifndef __native_word +# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +#endif + /* Compile time object size, -1 for unknown */ #ifndef __compiletime_object_size # define __compiletime_object_size(obj) -1 @@ -337,6 +342,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #define compiletime_assert(condition, msg) \ _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + /* * Prevent the compiler from merging or refetching accesses. The compiler * is also forbidden from reordering successive instances of ACCESS_ONCE(), diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 158158704c30..37b81bd51ec0 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev, static inline void user_enter(void) { - if (static_key_false(&context_tracking_enabled)) + if (context_tracking_is_enabled()) context_tracking_user_enter(); } static inline void user_exit(void) { - if (static_key_false(&context_tracking_enabled)) + if (context_tracking_is_enabled()) context_tracking_user_exit(); } @@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; - if (!static_key_false(&context_tracking_enabled)) + if (!context_tracking_is_enabled()) return 0; prev_ctx = this_cpu_read(context_tracking.state); @@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void) static inline void exception_exit(enum ctx_state prev_ctx) { - if (static_key_false(&context_tracking_enabled)) { + if (context_tracking_is_enabled()) { if (prev_ctx == IN_USER) context_tracking_user_enter(); } @@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) static inline void context_tracking_task_switch(struct task_struct *prev, struct task_struct *next) { - if (static_key_false(&context_tracking_enabled)) + if (context_tracking_is_enabled()) __context_tracking_task_switch(prev, next); } #else diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index 0f1979d0674f..97a81225d037 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -22,15 +22,20 @@ struct context_tracking { extern struct static_key context_tracking_enabled; DECLARE_PER_CPU(struct context_tracking, context_tracking); -static inline bool context_tracking_in_user(void) +static inline bool context_tracking_is_enabled(void) { - return __this_cpu_read(context_tracking.state) == IN_USER; + return static_key_false(&context_tracking_enabled); } -static inline bool context_tracking_active(void) +static inline bool context_tracking_cpu_is_enabled(void) { return __this_cpu_read(context_tracking.active); } + +static inline bool context_tracking_in_user(void) +{ + return __this_cpu_read(context_tracking.state) == IN_USER; +} #else static inline bool context_tracking_in_user(void) { return false; } static inline bool context_tracking_active(void) { return false; } diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index fe68a5a98583..7032518f8542 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -6,6 +6,8 @@ #include <linux/proc_fs.h> #include <linux/elf.h> +#include <asm/pgtable.h> /* for pgprot_t */ + #define ELFCORE_ADDR_MAX (-1ULL) #define ELFCORE_ADDR_ERR (-2ULL) diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 57e87e749a48..bf72e9ac6de0 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -29,8 +29,10 @@ struct vfsmount; /* The hash is always the low bits of hash_len */ #ifdef __LITTLE_ENDIAN #define HASH_LEN_DECLARE u32 hash; u32 len; + #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) #else #define HASH_LEN_DECLARE u32 len; u32 hash; + #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) #endif /* diff --git a/include/linux/edac.h b/include/linux/edac.h index dbdffe8d4469..8e6c20af11a2 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -35,6 +35,34 @@ extern void edac_atomic_assert_error(void); extern struct bus_type *edac_get_sysfs_subsys(void); extern void edac_put_sysfs_subsys(void); +enum { + EDAC_REPORTING_ENABLED, + EDAC_REPORTING_DISABLED, + EDAC_REPORTING_FORCE +}; + +extern int edac_report_status; +#ifdef CONFIG_EDAC +static inline int get_edac_report_status(void) +{ + return edac_report_status; +} + +static inline void set_edac_report_status(int new) +{ + edac_report_status = new; +} +#else +static inline int get_edac_report_status(void) +{ + return EDAC_REPORTING_DISABLED; +} + +static inline void set_edac_report_status(int new) +{ +} +#endif + static inline void opstate_init(void) { switch (edac_op_state) { diff --git a/include/linux/efi.h b/include/linux/efi.h index bc5687d0f315..0a819e7a60c9 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -556,6 +556,9 @@ extern struct efi { unsigned long hcdp; /* HCDP table */ unsigned long uga; /* UGA table */ unsigned long uv_systab; /* UV system table */ + unsigned long fw_vendor; /* fw_vendor */ + unsigned long runtime; /* runtime table */ + unsigned long config_table; /* config tables */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -653,6 +656,7 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ #define EFI_64BIT 5 /* Is the firmware 64-bit? */ +#define EFI_ARCH_1 6 /* First arch-specific bit */ #ifdef CONFIG_EFI # ifdef CONFIG_X86 @@ -801,6 +805,8 @@ struct efivar_entry { struct efi_variable var; struct list_head list; struct kobject kobj; + bool scanning; + bool deleting; }; @@ -866,6 +872,21 @@ void efivar_run_worker(void); #if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE) int efivars_sysfs_init(void); +#define EFIVARS_DATA_SIZE_MAX 1024 + #endif /* CONFIG_EFI_VARS */ +#ifdef CONFIG_EFI_RUNTIME_MAP +int efi_runtime_map_init(struct kobject *); +void efi_runtime_map_setup(void *, int, u32); +#else +static inline int efi_runtime_map_init(struct kobject *kobj) +{ + return 0; +} + +static inline void +efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {} +#endif + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 9abbe630c456..8c9b7a1c4138 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -248,6 +248,9 @@ struct ftrace_event_call { #ifdef CONFIG_PERF_EVENTS int perf_refcount; struct hlist_head __percpu *perf_events; + + int (*perf_perm)(struct ftrace_event_call *, + struct perf_event *); #endif }; @@ -317,6 +320,19 @@ struct ftrace_event_file { } \ early_initcall(trace_init_flags_##name); +#define __TRACE_EVENT_PERF_PERM(name, expr...) \ + static int perf_perm_##name(struct ftrace_event_call *tp_event, \ + struct perf_event *p_event) \ + { \ + return ({ expr; }); \ + } \ + static int __init trace_init_perf_perm_##name(void) \ + { \ + event_##name.perf_perm = &perf_perm_##name; \ + return 0; \ + } \ + early_initcall(trace_init_perf_perm_##name); + #define PERF_MAX_TRACE_SIZE 2048 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 82eac610ce1a..3ea2cf6b0e6c 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -2,9 +2,12 @@ #define __LINUX_GPIO_DRIVER_H #include <linux/types.h> +#include <linux/module.h> struct device; struct gpio_desc; +struct of_phandle_args; +struct device_node; struct seq_file; /** diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d9cf963ac832..12d5f972f23f 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -5,6 +5,7 @@ #include <linux/lockdep.h> #include <linux/ftrace_irq.h> #include <linux/vtime.h> +#include <asm/hardirq.h> extern void synchronize_irq(unsigned int irq); diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 206a2af6b62b..b914ca3f57ba 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -42,6 +42,8 @@ struct hid_sensor_hub_attribute_info { s32 units; s32 unit_expo; s32 size; + s32 logical_minimum; + s32 logical_maximum; }; /** diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index 4f945d3ed49f..8323775ac21d 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -117,4 +117,16 @@ #define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 #define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 +/* Power state enumerations */ +#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x00 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x01 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x02 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x03 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x04 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x05 + +/* Report State enumerations */ +#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x00 +#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x01 + #endif diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 9649ff0c63f8..bd7e98752222 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -142,7 +142,10 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) return 0; } -#define isolate_huge_page(p, l) false +static inline bool isolate_huge_page(struct page *page, struct list_head *list) +{ + return false; +} #define putback_active_hugepage(p) do {} while (0) #define is_hugepage_active(x) false diff --git a/include/linux/i2c.h b/include/linux/i2c.h index eff50e062be8..d9c8dbd3373f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -445,7 +445,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) static inline struct i2c_adapter * i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) { -#if IS_ENABLED(I2C_MUX) +#if IS_ENABLED(CONFIG_I2C_MUX) struct device *parent = adapter->dev.parent; if (parent != NULL && parent->type == &i2c_adapter_type) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b0ed422e4e4a..f0e52383a001 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -11,6 +11,7 @@ #include <linux/user_namespace.h> #include <linux/securebits.h> #include <linux/seqlock.h> +#include <linux/rbtree.h> #include <net/net_namespace.h> #include <linux/sched/rt.h> @@ -154,6 +155,14 @@ extern struct task_group root_task_group; #define INIT_TASK_COMM "swapper" +#ifdef CONFIG_RT_MUTEXES +# define INIT_RT_MUTEXES(tsk) \ + .pi_waiters = RB_ROOT, \ + .pi_waiters_leftmost = NULL, +#else +# define INIT_RT_MUTEXES(tsk) +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -221,6 +230,7 @@ extern struct task_group root_task_group; INIT_TRACE_RECURSION \ INIT_TASK_RCU_PREEMPT(tsk) \ INIT_CPUSET_SEQ(tsk) \ + INIT_RT_MUTEXES(tsk) \ INIT_VTIME(tsk) \ } diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 5d89d1b808a6..c56c350324e4 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -4,6 +4,7 @@ #include <uapi/linux/ipv6.h> #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) +#define ipv6_authlen(p) (((p)->hdrlen+2) << 2) /* * This structure contains configuration options per IPv6 link. */ diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index 714ba08dc092..e374e369fb2f 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h @@ -14,6 +14,6 @@ enum irqreturn { }; typedef enum irqreturn irqreturn_t; -#define IRQ_RETVAL(x) ((x) != IRQ_NONE) +#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE) #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d4e98d13eff4..2aa3d4b000e6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -193,7 +193,8 @@ extern int _cond_resched(void); (__x < 0) ? -__x : __x; \ }) -#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) +#if defined(CONFIG_MMU) && \ + (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) void might_fault(void); #else static inline void might_fault(void) { } @@ -393,6 +394,15 @@ extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int sysctl_panic_on_stackoverflow; +/* + * Only to be used by arch init code. If the user over-wrote the default + * CONFIG_PANIC_TIMEOUT, honor it. + */ +static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) +{ + if (panic_timeout == arch_default_timeout) + panic_timeout = timeout; +} extern const char *print_tainted(void); enum lockdep_ok { LOCKDEP_STILL_OK, diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d78d28a733b1..5fd33dc1fe3a 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; extern size_t vmcoreinfo_size; extern size_t vmcoreinfo_max_size; +/* flag to track if kexec reboot is in progress */ +extern bool kexec_in_progress; + int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, diff --git a/include/linux/libata.h b/include/linux/libata.h index 0e23c26485f4..9b503376738f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -418,6 +418,7 @@ enum { ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ + ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/lockref.h b/include/linux/lockref.h index c8929c3832db..4bfde0e99ed5 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -19,7 +19,7 @@ #define USE_CMPXCHG_LOCKREF \ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ - IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) + IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) struct lockref { union { diff --git a/include/linux/math64.h b/include/linux/math64.h index 69ed5f5e9f6e..c45c089bfdac 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return ret; } +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u32_shr */ + +#else + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + u32 ah, al; + u64 ret; + + al = a; + ah = a >> 32; + + ret = ((u64)al * mul) >> shift; + if (ah) + ret += ((u64)ah * mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_shr */ + +#endif + #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 2d0c9071bcfb..cab2dd279076 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -39,7 +39,8 @@ enum sec_device_type { struct sec_pmic_dev { struct device *dev; struct sec_platform_data *pdata; - struct regmap *regmap; + struct regmap *regmap_pmic; + struct regmap *regmap_rtc; struct i2c_client *i2c; struct i2c_client *rtc; diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index ad05ce60c1c9..2e5b194b9b19 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -22,6 +22,8 @@ #define PHY_ID_KSZ8021 0x00221555 #define PHY_ID_KSZ8031 0x00221556 #define PHY_ID_KSZ8041 0x00221510 +/* undocumented */ +#define PHY_ID_KSZ8041RNLI 0x00221537 #define PHY_ID_KSZ8051 0x00221550 /* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ #define PHY_ID_KSZ8001 0x0022161A diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f5096b58b20d..f015c059e159 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, - struct buffer_head *head, enum migrate_mode mode); + struct buffer_head *head, enum migrate_mode mode, + int extra_count); #else static inline void putback_lru_pages(struct list_head *l) {} @@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING +extern bool pmd_trans_migrating(pmd_t pmd); +extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); extern bool migrate_ratelimited(int node); #else +static inline bool pmd_trans_migrating(pmd_t pmd) +{ + return false; +} +static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) +{ +} static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 1cedd000cf29..35527173cf50 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ #if USE_SPLIT_PTE_PTLOCKS -#if BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS extern bool ptlock_alloc(struct page *page); extern void ptlock_free(struct page *page); @@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) { return page->ptl; } -#else /* BLOATED_SPINLOCKS */ +#else /* ALLOC_SPLIT_PTLOCKS */ static inline bool ptlock_alloc(struct page *page) { return true; @@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) { return &page->ptl; } -#endif /* BLOATED_SPINLOCKS */ +#endif /* ALLOC_SPLIT_PTLOCKS */ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bd299418a934..290901a8c1de 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -26,6 +26,7 @@ struct address_space; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) /* * Each physical page in the system has a struct page associated with @@ -155,7 +156,7 @@ struct page { * system if PG_buddy is set. */ #if USE_SPLIT_PTE_PTLOCKS -#if BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS spinlock_t *ptl; #else spinlock_t ptl; @@ -443,6 +444,14 @@ struct mm_struct { /* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq; #endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + /* + * An operation with batched TLB flushing is going on. Anything that + * can move process memory needs to flush the TLB when moving a + * PROT_NONE or PROT_NUMA mapped page. + */ + bool tlb_flush_pending; +#endif struct uprobes_state uprobes_state; }; @@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) return mm->cpu_vm_mask_var; } +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) +/* + * Memory barriers to keep this state in sync are graciously provided by + * the page table locks, outside of which no page table modifications happen. + * The barriers below prevent the compiler from re-ordering the instructions + * around the memory barriers that are already present in the code. + */ +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + return mm->tlb_flush_pending; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ + mm->tlb_flush_pending = true; + + /* + * Guarantee that the tlb_flush_pending store does not leak into the + * critical section updating the page tables + */ + smp_mb__before_spinlock(); +} +/* Clearing is done after a TLB flush, which also provides a barrier. */ +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + mm->tlb_flush_pending = false; +} +#else +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + return false; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ +} +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ +} +#endif + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/net.h b/include/linux/net.h index 4bcee94cef93..69be3e6079c8 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -181,7 +181,7 @@ struct proto_ops { int offset, size_t size, int flags); ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); - void (*set_peek_off)(struct sock *sk, int val); + int (*set_peek_off)(struct sock *sk, int val); }; #define DECLARE_SOCKADDR(type, dst, src) \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7f0ed423a360..ce2a1f5f9a1e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -769,7 +769,8 @@ struct netdev_phys_port_id { * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required can not be NULL. * - * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, + * void *accel_priv); * Called to decide which queue to when device supports multiple * transmit queues. * @@ -990,7 +991,8 @@ struct net_device_ops { netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev); u16 (*ndo_select_queue)(struct net_device *dev, - struct sk_buff *skb); + struct sk_buff *skb, + void *accel_priv); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); void (*ndo_set_rx_mode)(struct net_device *dev); @@ -1255,7 +1257,7 @@ struct net_device { unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ - unsigned char neigh_priv_len; + unsigned short neigh_priv_len; unsigned short dev_id; /* Used to differentiate devices * that share the same link * layer address @@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, } struct netdev_queue *netdev_pick_tx(struct net_device *dev, - struct sk_buff *skb); + struct sk_buff *skb, + void *accel_priv); u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); /* @@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct sk_buff *newskb); int dev_queue_xmit(struct sk_buff *skb); +int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); void unregister_netdevice_many(struct list_head *head); @@ -1912,6 +1916,15 @@ static inline int dev_parse_header(const struct sk_buff *skb, return dev->header_ops->parse(skb, haddr); } +static inline int dev_rebuild_header(struct sk_buff *skb) +{ + const struct net_device *dev = skb->dev; + + if (!dev->header_ops || !dev->header_ops->rebuild) + return 0; + return dev->header_ops->rebuild(skb); +} + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) @@ -2417,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_port_id *ppid); int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq, void *accel_priv); + struct netdev_queue *txq); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); extern int netdev_budget; @@ -3008,6 +3021,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev, dev->gso_max_size = size; } +static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, + int pulled_hlen, u16 mac_offset, + int mac_len) +{ + skb->protocol = protocol; + skb->encapsulation = 1; + skb_push(skb, pulled_hlen); + skb_reset_transport_header(skb); + skb->mac_header = mac_offset; + skb->network_header = skb->mac_header + mac_len; + skb->mac_len = mac_len; +} + static inline bool netif_is_macvlan(struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index c1637062c1ce..12c2cb947df5 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -413,16 +413,6 @@ enum lock_type4 { #define NFS4_VERSION 4 #define NFS4_MINOR_VERSION 0 -#if defined(CONFIG_NFS_V4_2) -#define NFS4_MAX_MINOR_VERSION 2 -#else -#if defined(CONFIG_NFS_V4_1) -#define NFS4_MAX_MINOR_VERSION 1 -#else -#define NFS4_MAX_MINOR_VERSION 0 -#endif /* CONFIG_NFS_V4_1 */ -#endif /* CONFIG_NFS_V4_2 */ - #define NFS4_DEBUG 1 /* Index of predefined Linux client operations */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 14a48207a304..48997374eaf0 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -507,24 +507,6 @@ extern int nfs_mountpoint_expiry_timeout; extern void nfs_release_automount_timer(void); /* - * linux/fs/nfs/nfs4proc.c - */ -#ifdef CONFIG_NFS_V4_SECURITY_LABEL -extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); -static inline void nfs4_label_free(struct nfs4_label *label) -{ - if (label) { - kfree(label->label); - kfree(label); - } - return; -} -#else -static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; } -static inline void nfs4_label_free(void *label) {} -#endif - -/* * linux/fs/nfs/unlink.c */ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); diff --git a/include/linux/pci.h b/include/linux/pci.h index 1084a15175e0..a13d6825e586 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -960,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); int pci_select_bars(struct pci_dev *dev, unsigned long flags); +bool pci_device_is_present(struct pci_dev *pdev); /* ROM control related routines */ int pci_enable_rom(struct pci_dev *pdev); @@ -1567,65 +1568,65 @@ enum pci_fixup_pass { /* Anonymous variables would be nice... */ #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ class_shift, hook) \ - static const struct pci_fixup __pci_fixup_##name __used \ + static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ __attribute__((__section__(#section), aligned((sizeof(void *))))) \ = { vendor, device, class, class_shift, hook }; #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ - vendor##device##hook, vendor, device, class, class_shift, hook) + hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ - vendor##device##hook, vendor, device, class, class_shift, hook) + hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ - vendor##device##hook, vendor, device, class, class_shift, hook) + hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ - vendor##device##hook, vendor, device, class, class_shift, hook) + hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ - resume##vendor##device##hook, vendor, device, class, \ + resume##hook, vendor, device, class, \ class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ - resume_early##vendor##device##hook, vendor, device, \ + resume_early##hook, vendor, device, \ class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ - suspend##vendor##device##hook, vendor, device, class, \ + suspend##hook, vendor, device, class, \ class_shift, hook) #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ - vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) + hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ - vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) + hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ - vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) + hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ - vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) + hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ - resume##vendor##device##hook, vendor, device, \ + resume##hook, vendor, device, \ PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ - resume_early##vendor##device##hook, vendor, device, \ + resume_early##hook, vendor, device, \ PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ - suspend##vendor##device##hook, vendor, device, \ + suspend##hook, vendor, device, \ PCI_ANY_ID, 0, hook) #ifdef CONFIG_PCI_QUIRKS diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 57e890abe1f0..a5fc7d01aad6 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -69,6 +69,7 @@ __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ + extern __PCPU_ATTRS(sec) __typeof__(type) name; \ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ __typeof__(type) name #else diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e069d1288df..e56b07f5c9b6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -320,6 +320,7 @@ struct perf_event { struct list_head migrate_entry; struct hlist_node hlist_entry; + struct list_head active_entry; int nr_siblings; int group_flags; struct perf_event *group_leader; diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h index c167e4429bc7..0e3cce130fe2 100644 --- a/include/linux/platform_data/hwmon-s3c.h +++ b/include/linux/platform_data/hwmon-s3c.h @@ -1,5 +1,4 @@ -/* linux/arch/arm/plat-s3c/include/plat/hwmon.h - * +/* * Copyright 2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ @@ -11,8 +10,8 @@ * published by the Free Software Foundation. */ -#ifndef __ASM_ARCH_ADC_HWMON_H -#define __ASM_ARCH_ADC_HWMON_H __FILE__ +#ifndef __HWMON_S3C_H__ +#define __HWMON_S3C_H__ /** * s3c_hwmon_chcfg - channel configuration @@ -47,5 +46,4 @@ struct s3c_hwmon_pdata { */ extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd); -#endif /* __ASM_ARCH_ADC_HWMON_H */ - +#endif /* __HWMON_S3C_H__ */ diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h index e2a41dd7690c..8da8f94ee15c 100644 --- a/include/linux/platform_data/max197.h +++ b/include/linux/platform_data/max197.h @@ -11,6 +11,9 @@ * For further information, see the Documentation/hwmon/max197 file. */ +#ifndef _PDATA_MAX197_H +#define _PDATA_MAX197_H + /** * struct max197_platform_data - MAX197 connectivity info * @convert: Function used to start a conversion with control byte ctrl. @@ -19,3 +22,5 @@ struct max197_platform_data { int (*convert)(u8 ctrl); }; + +#endif /* _PDATA_MAX197_H */ diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h index 33e0fd27225e..12289c1e9413 100644 --- a/include/linux/platform_data/sht15.h +++ b/include/linux/platform_data/sht15.h @@ -12,6 +12,9 @@ * For further information, see the Documentation/hwmon/sht15 file. */ +#ifndef _PDATA_SHT15_H +#define _PDATA_SHT15_H + /** * struct sht15_platform_data - sht15 connectivity info * @gpio_data: no. of gpio to which bidirectional data line is @@ -31,3 +34,5 @@ struct sht15_platform_data { bool no_otp_reload; bool low_resolution; }; + +#endif /* _PDATA_SHT15_H */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index a3d9dc8c2c00..59749fc48328 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -64,7 +64,11 @@ do { \ } while (0) #else -#define preempt_enable() preempt_enable_no_resched() +#define preempt_enable() \ +do { \ + barrier(); \ + preempt_count_dec(); \ +} while (0) #define preempt_check_resched() do { } while (0) #endif @@ -93,7 +97,11 @@ do { \ __preempt_schedule_context(); \ } while (0) #else -#define preempt_enable_notrace() preempt_enable_no_resched_notrace() +#define preempt_enable_notrace() \ +do { \ + barrier(); \ + __preempt_count_dec(); \ +} while (0) #endif #else /* !CONFIG_PREEMPT_COUNT */ @@ -116,6 +124,31 @@ do { \ #endif /* CONFIG_PREEMPT_COUNT */ +#ifdef MODULE +/* + * Modules have no business playing preemption tricks. + */ +#undef sched_preempt_enable_no_resched +#undef preempt_enable_no_resched +#undef preempt_enable_no_resched_notrace +#undef preempt_check_resched +#endif + +#ifdef CONFIG_PREEMPT +#define preempt_set_need_resched() \ +do { \ + set_preempt_need_resched(); \ +} while (0) +#define preempt_fold_need_resched() \ +do { \ + if (tif_need_resched()) \ + set_preempt_need_resched(); \ +} while (0) +#else +#define preempt_set_need_resched() do { } while (0) +#define preempt_fold_need_resched() do { } while (0) +#endif + #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h index d169820203dd..dbeec4d4a3be 100644 --- a/include/linux/preempt_mask.h +++ b/include/linux/preempt_mask.h @@ -2,7 +2,6 @@ #define LINUX_PREEMPT_MASK_H #include <linux/preempt.h> -#include <asm/hardirq.h> /* * We put the hardirq and softirq counter into the preemption @@ -79,6 +78,21 @@ #endif /* + * The preempt_count offset needed for things like: + * + * spin_lock_bh() + * + * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and + * softirqs, such that unlock sequences of: + * + * spin_unlock(); + * local_bh_enable(); + * + * Work as expected. + */ +#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET) + +/* * Are we running in atomic context? WARNING: this macro cannot * always detect atomic context; in particular, it cannot know about * held spinlocks in non-preemptible kernels. Thus it should not be diff --git a/include/linux/pstore.h b/include/linux/pstore.h index abd437d0a8a7..ece0c6bbfcc5 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -51,6 +51,7 @@ struct pstore_info { char *buf; size_t bufsize; struct mutex read_mutex; /* serialize open/read/close */ + int flags; int (*open)(struct pstore_info *psi); int (*close)(struct pstore_info *psi); ssize_t (*read)(u64 *id, enum pstore_type_id *type, @@ -70,6 +71,8 @@ struct pstore_info { void *data; }; +#define PSTORE_FLAGS_FRAGILE 1 + #ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 45a0a9e81478..dbaf99084112 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -55,8 +55,8 @@ static inline void __list_add_rcu(struct list_head *new, next->prev = new; } #else -extern void __list_add_rcu(struct list_head *new, - struct list_head *prev, struct list_head *next); +void __list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next); #endif /** diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 39cbb889e20d..3e355c688618 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -50,13 +50,13 @@ extern int rcutorture_runnable; /* for sysctl */ #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) -extern void rcutorture_record_test_transition(void); -extern void rcutorture_record_progress(unsigned long vernum); -extern void do_trace_rcu_torture_read(const char *rcutorturename, - struct rcu_head *rhp, - unsigned long secs, - unsigned long c_old, - unsigned long c); +void rcutorture_record_test_transition(void); +void rcutorture_record_progress(unsigned long vernum); +void do_trace_rcu_torture_read(const char *rcutorturename, + struct rcu_head *rhp, + unsigned long secs, + unsigned long c_old, + unsigned long c); #else static inline void rcutorture_record_test_transition(void) { @@ -65,11 +65,11 @@ static inline void rcutorture_record_progress(unsigned long vernum) { } #ifdef CONFIG_RCU_TRACE -extern void do_trace_rcu_torture_read(const char *rcutorturename, - struct rcu_head *rhp, - unsigned long secs, - unsigned long c_old, - unsigned long c); +void do_trace_rcu_torture_read(const char *rcutorturename, + struct rcu_head *rhp, + unsigned long secs, + unsigned long c_old, + unsigned long c); #else #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ do { } while (0) @@ -118,8 +118,8 @@ extern void do_trace_rcu_torture_read(const char *rcutorturename, * if CPU A and CPU B are the same CPU (but again only if the system has * more than one CPU). */ -extern void call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *head)); +void call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head)); #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -149,8 +149,8 @@ extern void call_rcu(struct rcu_head *head, * See the description of call_rcu() for more detailed information on * memory ordering guarantees. */ -extern void call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *head)); +void call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head)); /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. @@ -171,16 +171,16 @@ extern void call_rcu_bh(struct rcu_head *head, * See the description of call_rcu() for more detailed information on * memory ordering guarantees. */ -extern void call_rcu_sched(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)); +void call_rcu_sched(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)); -extern void synchronize_sched(void); +void synchronize_sched(void); #ifdef CONFIG_PREEMPT_RCU -extern void __rcu_read_lock(void); -extern void __rcu_read_unlock(void); -extern void rcu_read_unlock_special(struct task_struct *t); +void __rcu_read_lock(void); +void __rcu_read_unlock(void); +void rcu_read_unlock_special(struct task_struct *t); void synchronize_rcu(void); /* @@ -216,19 +216,19 @@ static inline int rcu_preempt_depth(void) #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ -extern void rcu_init(void); -extern void rcu_sched_qs(int cpu); -extern void rcu_bh_qs(int cpu); -extern void rcu_check_callbacks(int cpu, int user); +void rcu_init(void); +void rcu_sched_qs(int cpu); +void rcu_bh_qs(int cpu); +void rcu_check_callbacks(int cpu, int user); struct notifier_block; -extern void rcu_idle_enter(void); -extern void rcu_idle_exit(void); -extern void rcu_irq_enter(void); -extern void rcu_irq_exit(void); +void rcu_idle_enter(void); +void rcu_idle_exit(void); +void rcu_irq_enter(void); +void rcu_irq_exit(void); #ifdef CONFIG_RCU_USER_QS -extern void rcu_user_enter(void); -extern void rcu_user_exit(void); +void rcu_user_enter(void); +void rcu_user_exit(void); #else static inline void rcu_user_enter(void) { } static inline void rcu_user_exit(void) { } @@ -262,7 +262,7 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, } while (0) #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) -extern bool __rcu_is_watching(void); +bool __rcu_is_watching(void); #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ /* @@ -289,8 +289,8 @@ void wait_rcu_gp(call_rcu_func_t crf); * initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD -extern void init_rcu_head_on_stack(struct rcu_head *head); -extern void destroy_rcu_head_on_stack(struct rcu_head *head); +void init_rcu_head_on_stack(struct rcu_head *head); +void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ static inline void init_rcu_head_on_stack(struct rcu_head *head) { @@ -325,6 +325,7 @@ static inline void rcu_lock_release(struct lockdep_map *map) extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_bh_lock_map; extern struct lockdep_map rcu_sched_lock_map; +extern struct lockdep_map rcu_callback_map; extern int debug_lockdep_rcu_enabled(void); /** @@ -362,7 +363,7 @@ static inline int rcu_read_lock_held(void) * rcu_read_lock_bh_held() is defined out of line to avoid #include-file * hell. */ -extern int rcu_read_lock_bh_held(void); +int rcu_read_lock_bh_held(void); /** * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? @@ -448,7 +449,7 @@ static inline int rcu_read_lock_sched_held(void) #ifdef CONFIG_PROVE_RCU -extern int rcu_my_thread_group_empty(void); +int rcu_my_thread_group_empty(void); /** * rcu_lockdep_assert - emit lockdep splat if specified condition not met @@ -548,10 +549,48 @@ static inline void rcu_preempt_sleep_check(void) smp_read_barrier_depends(); \ (_________p1); \ }) -#define __rcu_assign_pointer(p, v, space) \ + +/** + * RCU_INITIALIZER() - statically initialize an RCU-protected global variable + * @v: The value to statically initialize with. + */ +#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) + +/** + * rcu_assign_pointer() - assign to RCU-protected pointer + * @p: pointer to assign to + * @v: value to assign (publish) + * + * Assigns the specified value to the specified RCU-protected + * pointer, ensuring that any concurrent RCU readers will see + * any prior initialization. + * + * Inserts memory barriers on architectures that require them + * (which is most of them), and also prevents the compiler from + * reordering the code that initializes the structure after the pointer + * assignment. More importantly, this call documents which pointers + * will be dereferenced by RCU read-side code. + * + * In some special cases, you may use RCU_INIT_POINTER() instead + * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due + * to the fact that it does not constrain either the CPU or the compiler. + * That said, using RCU_INIT_POINTER() when you should have used + * rcu_assign_pointer() is a very bad thing that results in + * impossible-to-diagnose memory corruption. So please be careful. + * See the RCU_INIT_POINTER() comment header for details. + * + * Note that rcu_assign_pointer() evaluates each of its arguments only + * once, appearances notwithstanding. One of the "extra" evaluations + * is in typeof() and the other visible only to sparse (__CHECKER__), + * neither of which actually execute the argument. As with most cpp + * macros, this execute-arguments-only-once property is important, so + * please be careful when making changes to rcu_assign_pointer() and the + * other macros that it invokes. + */ +#define rcu_assign_pointer(p, v) \ do { \ smp_wmb(); \ - (p) = (typeof(*v) __force space *)(v); \ + ACCESS_ONCE(p) = RCU_INITIALIZER(v); \ } while (0) @@ -890,32 +929,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) } /** - * rcu_assign_pointer() - assign to RCU-protected pointer - * @p: pointer to assign to - * @v: value to assign (publish) - * - * Assigns the specified value to the specified RCU-protected - * pointer, ensuring that any concurrent RCU readers will see - * any prior initialization. - * - * Inserts memory barriers on architectures that require them - * (which is most of them), and also prevents the compiler from - * reordering the code that initializes the structure after the pointer - * assignment. More importantly, this call documents which pointers - * will be dereferenced by RCU read-side code. - * - * In some special cases, you may use RCU_INIT_POINTER() instead - * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due - * to the fact that it does not constrain either the CPU or the compiler. - * That said, using RCU_INIT_POINTER() when you should have used - * rcu_assign_pointer() is a very bad thing that results in - * impossible-to-diagnose memory corruption. So please be careful. - * See the RCU_INIT_POINTER() comment header for details. - */ -#define rcu_assign_pointer(p, v) \ - __rcu_assign_pointer((p), (v), __rcu) - -/** * RCU_INIT_POINTER() - initialize an RCU protected pointer * * Initialize an RCU-protected pointer in special cases where readers @@ -949,7 +962,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define RCU_INIT_POINTER(p, v) \ do { \ - p = (typeof(*v) __force __rcu *)(v); \ + p = RCU_INITIALIZER(v); \ } while (0) /** @@ -958,7 +971,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * GCC-style initialization for an RCU-protected pointer in a structure field. */ #define RCU_POINTER_INITIALIZER(p, v) \ - .p = (typeof(*v) __force __rcu *)(v) + .p = RCU_INITIALIZER(v) /* * Does the specified offset indicate that the corresponding rcu_head @@ -1005,7 +1018,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) #ifdef CONFIG_RCU_NOCB_CPU -extern bool rcu_is_nocb_cpu(int cpu); +bool rcu_is_nocb_cpu(int cpu); #else static inline bool rcu_is_nocb_cpu(int cpu) { return false; } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ @@ -1013,8 +1026,8 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; } /* Only for use by adaptive-ticks code. */ #ifdef CONFIG_NO_HZ_FULL_SYSIDLE -extern bool rcu_sys_is_idle(void); -extern void rcu_sysidle_force_exit(void); +bool rcu_sys_is_idle(void); +void rcu_sysidle_force_exit(void); #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ static inline bool rcu_sys_is_idle(void) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 09ebcbe9fd78..6f01771b571c 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -125,7 +125,7 @@ static inline void exit_rcu(void) #ifdef CONFIG_DEBUG_LOCK_ALLOC extern int rcu_scheduler_active __read_mostly; -extern void rcu_scheduler_starting(void); +void rcu_scheduler_starting(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ static inline void rcu_scheduler_starting(void) { diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 4b9c81548742..72137ee8c603 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,9 +30,9 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H -extern void rcu_note_context_switch(int cpu); -extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); -extern void rcu_cpu_stall_reset(void); +void rcu_note_context_switch(int cpu); +int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); +void rcu_cpu_stall_reset(void); /* * Note a virtualization-based context switch. This is simply a @@ -44,9 +44,9 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(cpu); } -extern void synchronize_rcu_bh(void); -extern void synchronize_sched_expedited(void); -extern void synchronize_rcu_expedited(void); +void synchronize_rcu_bh(void); +void synchronize_sched_expedited(void); +void synchronize_rcu_expedited(void); void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); @@ -71,25 +71,25 @@ static inline void synchronize_rcu_bh_expedited(void) synchronize_sched_expedited(); } -extern void rcu_barrier(void); -extern void rcu_barrier_bh(void); -extern void rcu_barrier_sched(void); +void rcu_barrier(void); +void rcu_barrier_bh(void); +void rcu_barrier_sched(void); extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_vernum; -extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); -extern long rcu_batches_completed_sched(void); +long rcu_batches_completed(void); +long rcu_batches_completed_bh(void); +long rcu_batches_completed_sched(void); -extern void rcu_force_quiescent_state(void); -extern void rcu_bh_force_quiescent_state(void); -extern void rcu_sched_force_quiescent_state(void); +void rcu_force_quiescent_state(void); +void rcu_bh_force_quiescent_state(void); +void rcu_sched_force_quiescent_state(void); -extern void exit_rcu(void); +void exit_rcu(void); -extern void rcu_scheduler_starting(void); +void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; -extern bool rcu_is_watching(void); +bool rcu_is_watching(void); #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 8e00f9f6f963..9e7db9e73cc1 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *); * Architecture-specific implementations of sys_reboot commands. */ +extern void migrate_to_reboot_cpu(void); extern void machine_restart(char *cmd); extern void machine_halt(void); extern void machine_power_off(void); diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index de17134244f3..3aed8d737e1a 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -13,7 +13,7 @@ #define __LINUX_RT_MUTEX_H #include <linux/linkage.h> -#include <linux/plist.h> +#include <linux/rbtree.h> #include <linux/spinlock_types.h> extern int max_lock_depth; /* for sysctl */ @@ -22,12 +22,14 @@ extern int max_lock_depth; /* for sysctl */ * The rt_mutex structure * * @wait_lock: spinlock to protect the structure - * @wait_list: pilist head to enqueue waiters in priority order + * @waiters: rbtree root to enqueue waiters in priority order + * @waiters_leftmost: top waiter * @owner: the mutex owner */ struct rt_mutex { raw_spinlock_t wait_lock; - struct plist_head wait_list; + struct rb_root waiters; + struct rb_node *waiters_leftmost; struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES int save_state; @@ -66,7 +68,7 @@ struct hrtimer_sleeper; #define __RT_MUTEX_INITIALIZER(mutexname) \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \ + , .waiters = RB_ROOT \ , .owner = NULL \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} @@ -98,12 +100,4 @@ extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); -#ifdef CONFIG_RT_MUTEXES -# define INIT_RT_MUTEXES(tsk) \ - .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \ - INIT_RT_MUTEX_DEBUG(tsk) -#else -# define INIT_RT_MUTEXES(tsk) -#endif - #endif diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 939428ad25ac..8e3e66ac0a52 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -24,6 +24,11 @@ extern int rtnl_trylock(void); extern int rtnl_is_locked(void); #ifdef CONFIG_PROVE_LOCKING extern int lockdep_rtnl_is_held(void); +#else +static inline int lockdep_rtnl_is_held(void) +{ + return 1; +} #endif /* #ifdef CONFIG_PROVE_LOCKING */ /** diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index 9c9f0495d37c..5b9b84b20407 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h @@ -172,8 +172,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock) static inline void __raw_read_lock_bh(rwlock_t *lock) { - local_bh_disable(); - preempt_disable(); + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); } @@ -200,8 +199,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock) static inline void __raw_write_lock_bh(rwlock_t *lock) { - local_bh_disable(); - preempt_disable(); + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); } @@ -250,8 +248,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); - preempt_enable_no_resched(); - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); + __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, @@ -275,8 +272,7 @@ static inline void __raw_write_unlock_bh(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); - preempt_enable_no_resched(); - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); + __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } #endif /* __LINUX_RWLOCK_API_SMP_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 7e35d4b9e14a..ffccdad050b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -16,6 +16,7 @@ struct sched_param { #include <linux/types.h> #include <linux/timex.h> #include <linux/jiffies.h> +#include <linux/plist.h> #include <linux/rbtree.h> #include <linux/thread_info.h> #include <linux/cpumask.h> @@ -56,6 +57,70 @@ struct sched_param { #include <asm/processor.h> +#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ + +/* + * Extended scheduling parameters data structure. + * + * This is needed because the original struct sched_param can not be + * altered without introducing ABI issues with legacy applications + * (e.g., in sched_getparam()). + * + * However, the possibility of specifying more than just a priority for + * the tasks may be useful for a wide variety of application fields, e.g., + * multimedia, streaming, automation and control, and many others. + * + * This variant (sched_attr) is meant at describing a so-called + * sporadic time-constrained task. In such model a task is specified by: + * - the activation period or minimum instance inter-arrival time; + * - the maximum (or average, depending on the actual scheduling + * discipline) computation time of all instances, a.k.a. runtime; + * - the deadline (relative to the actual activation time) of each + * instance. + * Very briefly, a periodic (sporadic) task asks for the execution of + * some specific computation --which is typically called an instance-- + * (at most) every period. Moreover, each instance typically lasts no more + * than the runtime and must be completed by time instant t equal to + * the instance activation time + the deadline. + * + * This is reflected by the actual fields of the sched_attr structure: + * + * @size size of the structure, for fwd/bwd compat. + * + * @sched_policy task's scheduling policy + * @sched_flags for customizing the scheduler behaviour + * @sched_nice task's nice value (SCHED_NORMAL/BATCH) + * @sched_priority task's static priority (SCHED_FIFO/RR) + * @sched_deadline representative of the task's deadline + * @sched_runtime representative of the task's runtime + * @sched_period representative of the task's period + * + * Given this task model, there are a multiplicity of scheduling algorithms + * and policies, that can be used to ensure all the tasks will make their + * timing constraints. + * + * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the + * only user of this new interface. More information about the algorithm + * available in the scheduling class file or in Documentation/. + */ +struct sched_attr { + u32 size; + + u32 sched_policy; + u64 sched_flags; + + /* SCHED_NORMAL, SCHED_BATCH */ + s32 sched_nice; + + /* SCHED_FIFO, SCHED_RR */ + u32 sched_priority; + + /* SCHED_DEADLINE */ + u64 sched_runtime; + u64 sched_deadline; + u64 sched_period; +}; + struct exec_domain; struct futex_pi_state; struct robust_list_head; @@ -168,7 +233,6 @@ extern char ___assert_task_state[1 - 2*!!( #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) -#define task_is_dead(task) ((task)->exit_state != 0) #define task_is_stopped_or_traced(task) \ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) #define task_contributes_to_load(task) \ @@ -440,8 +504,6 @@ struct task_cputime { .sum_exec_runtime = 0, \ } -#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) - #ifdef CONFIG_PREEMPT_COUNT #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) #else @@ -831,8 +893,6 @@ struct sched_domain { unsigned int balance_interval; /* initialise to 1. units in ms. */ unsigned int nr_balance_failed; /* initialise to 0 */ - u64 last_update; - /* idle_balance() stats */ u64 max_newidle_lb_cost; unsigned long next_decay_max_lb_cost; @@ -934,7 +994,8 @@ struct pipe_inode_info; struct uts_namespace; struct load_weight { - unsigned long weight, inv_weight; + unsigned long weight; + u32 inv_weight; }; struct sched_avg { @@ -1032,6 +1093,51 @@ struct sched_rt_entity { #endif }; +struct sched_dl_entity { + struct rb_node rb_node; + + /* + * Original scheduling parameters. Copied here from sched_attr + * during sched_setscheduler2(), they will remain the same until + * the next sched_setscheduler2(). + */ + u64 dl_runtime; /* maximum runtime for each instance */ + u64 dl_deadline; /* relative deadline of each instance */ + u64 dl_period; /* separation of two instances (period) */ + u64 dl_bw; /* dl_runtime / dl_deadline */ + + /* + * Actual scheduling parameters. Initialized with the values above, + * they are continously updated during task execution. Note that + * the remaining runtime could be < 0 in case we are in overrun. + */ + s64 runtime; /* remaining runtime for this instance */ + u64 deadline; /* absolute deadline for this instance */ + unsigned int flags; /* specifying the scheduler behaviour */ + + /* + * Some bool flags: + * + * @dl_throttled tells if we exhausted the runtime. If so, the + * task has to wait for a replenishment to be performed at the + * next firing of dl_timer. + * + * @dl_new tells if a new instance arrived. If so we must + * start executing it with full runtime and reset its absolute + * deadline; + * + * @dl_boosted tells if we are boosted due to DI. If so we are + * outside bandwidth enforcement mechanism (but only until we + * exit the critical section). + */ + int dl_throttled, dl_new, dl_boosted; + + /* + * Bandwidth enforcement timer. Each -deadline task has its + * own bandwidth to be enforced, thus we need one timer per task. + */ + struct hrtimer dl_timer; +}; struct rcu_node; @@ -1068,6 +1174,7 @@ struct task_struct { #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; #endif + struct sched_dl_entity dl; #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -1101,6 +1208,7 @@ struct task_struct { struct list_head tasks; #ifdef CONFIG_SMP struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; #endif struct mm_struct *mm, *active_mm; @@ -1252,9 +1360,12 @@ struct task_struct { #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task */ - struct plist_head pi_waiters; + struct rb_root pi_waiters; + struct rb_node *pi_waiters_leftmost; /* Deadlock detection and priority inheritance handling */ struct rt_mutex_waiter *pi_blocked_on; + /* Top pi_waiters task */ + struct task_struct *pi_top_task; #endif #ifdef CONFIG_DEBUG_MUTEXES @@ -1883,7 +1994,9 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns) * but then during bootup it turns out that sched_clock() * is reliable after all: */ -extern int sched_clock_stable; +extern int sched_clock_stable(void); +extern void set_sched_clock_stable(void); +extern void clear_sched_clock_stable(void); extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); @@ -1962,6 +2075,8 @@ extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_setattr(struct task_struct *, + const struct sched_attr *); extern struct task_struct *idle_task(int cpu); /** * is_idle_task - is the specified task an idle task? @@ -2041,7 +2156,7 @@ extern void wake_up_new_task(struct task_struct *tsk); #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void sched_fork(unsigned long clone_flags, struct task_struct *p); +extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern void sched_dead(struct task_struct *p); extern void proc_caches_init(void); @@ -2630,6 +2745,21 @@ static inline bool __must_check current_clr_polling_and_test(void) } #endif +static inline void current_clr_polling(void) +{ + __current_clr_polling(); + + /* + * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. + * Once the bit is cleared, we'll get IPIs with every new + * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also + * fold. + */ + smp_mb(); /* paired with resched_task() */ + + preempt_fold_need_resched(); +} + static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h new file mode 100644 index 000000000000..9d303b8847df --- /dev/null +++ b/include/linux/sched/deadline.h @@ -0,0 +1,24 @@ +#ifndef _SCHED_DEADLINE_H +#define _SCHED_DEADLINE_H + +/* + * SCHED_DEADLINE tasks has negative priorities, reflecting + * the fact that any of them has higher prio than RT and + * NORMAL/BATCH tasks. + */ + +#define MAX_DL_PRIO 0 + +static inline int dl_prio(int prio) +{ + if (unlikely(prio < MAX_DL_PRIO)) + return 1; + return 0; +} + +static inline int dl_task(struct task_struct *p) +{ + return dl_prio(p->prio); +} + +#endif /* _SCHED_DEADLINE_H */ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index 440434df3627..34e4ebea8fce 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -35,6 +35,7 @@ static inline int rt_task(struct task_struct *p) #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); extern void rt_mutex_adjust_pi(struct task_struct *p); static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { @@ -45,6 +46,10 @@ static inline int rt_mutex_getprio(struct task_struct *p) { return p->normal_prio; } +static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) +{ + return NULL; +} # define rt_mutex_adjust_pi(p) do { } while (0) static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 41467f8ff8ec..31e0193cb0c5 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -48,7 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_delay; extern unsigned int sysctl_numa_balancing_scan_period_min; extern unsigned int sysctl_numa_balancing_scan_period_max; extern unsigned int sysctl_numa_balancing_scan_size; -extern unsigned int sysctl_numa_balancing_settle_count; #ifdef CONFIG_SCHED_DEBUG extern unsigned int sysctl_sched_migration_cost; diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index cf87a24c0f92..535f158977b9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -117,15 +117,15 @@ repeat: } /** - * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep + * raw_read_seqcount_begin - start seq-read critical section w/o lockdep * @s: pointer to seqcount_t * Returns: count to be passed to read_seqcount_retry * - * read_seqcount_begin_no_lockdep opens a read critical section of the given + * raw_read_seqcount_begin opens a read critical section of the given * seqcount, but without any lockdep checking. Validity of the critical * section is tested by checking read_seqcount_retry function. */ -static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s) +static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) { unsigned ret = __read_seqcount_begin(s); smp_rmb(); @@ -144,7 +144,7 @@ static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s) static inline unsigned read_seqcount_begin(const seqcount_t *s) { seqcount_lockdep_reader_access(s); - return read_seqcount_begin_no_lockdep(s); + return raw_read_seqcount_begin(s); } /** @@ -206,14 +206,26 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) } + +static inline void raw_write_seqcount_begin(seqcount_t *s) +{ + s->sequence++; + smp_wmb(); +} + +static inline void raw_write_seqcount_end(seqcount_t *s) +{ + smp_wmb(); + s->sequence++; +} + /* * Sequence counter only version assumes that callers are using their * own mutexing. */ static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) { - s->sequence++; - smp_wmb(); + raw_write_seqcount_begin(s); seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); } @@ -225,8 +237,7 @@ static inline void write_seqcount_begin(seqcount_t *s) static inline void write_seqcount_end(seqcount_t *s) { seqcount_release(&s->dep_map, 1, _RET_IP_); - smp_wmb(); - s->sequence++; + raw_write_seqcount_end(s); } /** diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 30aa0dc60d75..9d55438bc4ad 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -47,6 +47,8 @@ extern int shmem_init(void); extern int shmem_fill_super(struct super_block *sb, void *data, int silent); extern struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); +extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, + unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); extern void shmem_unlock_mapping(struct address_space *mapping); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bec1cc7d5e3c..6f69b3f914fb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) skb->mac_header += offset; } +static inline void skb_pop_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->network_header; +} + static inline void skb_probe_transport_header(struct sk_buff *skb, const int offset_hint) { @@ -2263,6 +2268,24 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +/** + * pskb_trim_rcsum - trim received skb and update checksum + * @skb: buffer to trim + * @len: new length + * + * This is exactly the same as pskb_trim except that it ensures the + * checksum of received packets are still valid after the operation. + */ + +static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (likely(len >= skb->len)) + return 0; + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + return __pskb_trim(skb, len); +} + #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ @@ -2360,27 +2383,6 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); -/** - * pskb_trim_rcsum - trim received skb and update checksum - * @skb: buffer to trim - * @len: new length - * - * This is exactly the same as pskb_trim except that it ensures the - * checksum of received packets are still valid after the operation. - */ - -static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) -{ - if (likely(len >= skb->len)) - return 0; - if (skb->ip_summed == CHECKSUM_COMPLETE) { - __wsum adj = skb_checksum(skb, len, skb->len - len, 0); - - skb->csum = csum_sub(skb->csum, adj); - } - return __pskb_trim(skb, len); -} - static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) { @@ -2529,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb) * Ethernet MAC Drivers should call this function in their hard_xmit() * function immediately before giving the sk_buff to the MAC hardware. * + * Specifically, one should make absolutely sure that this function is + * called before TX completion of this packet can trigger. Otherwise + * the packet could potentially already be freed. + * * @skb: A socket buffer. */ static inline void skb_tx_timestamp(struct sk_buff *skb) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 75f34949d9ab..3f2867ff0ced 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -130,6 +130,16 @@ do { \ #define smp_mb__before_spinlock() smp_wmb() #endif +/* + * Place this after a lock-acquisition primitive to guarantee that + * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies + * if the UNLOCK and LOCK are executed by the same CPU or if the + * UNLOCK and LOCK operate on the same lock variable. + */ +#ifndef smp_mb__after_unlock_lock +#define smp_mb__after_unlock_lock() do { } while (0) +#endif + /** * raw_spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index bdb9993f0fda..42dfab89e740 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -131,8 +131,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) { - local_bh_disable(); - preempt_disable(); + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } @@ -174,20 +173,17 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); - preempt_enable_no_resched(); - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); + __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) { - local_bh_disable(); - preempt_disable(); + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } - preempt_enable_no_resched(); - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); + __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); return 0; } diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index af1f47229e70..d0d188861ad6 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -24,11 +24,14 @@ * flags straight, to suppress compiler warnings of unused lock * variables, and to add the proper checker annotations: */ +#define ___LOCK(lock) \ + do { __acquire(lock); (void)(lock); } while (0) + #define __LOCK(lock) \ - do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) + do { preempt_disable(); ___LOCK(lock); } while (0) #define __LOCK_BH(lock) \ - do { local_bh_disable(); __LOCK(lock); } while (0) + do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) #define __LOCK_IRQ(lock) \ do { local_irq_disable(); __LOCK(lock); } while (0) @@ -36,12 +39,15 @@ #define __LOCK_IRQSAVE(lock, flags) \ do { local_irq_save(flags); __LOCK(lock); } while (0) +#define ___UNLOCK(lock) \ + do { __release(lock); (void)(lock); } while (0) + #define __UNLOCK(lock) \ - do { preempt_enable(); __release(lock); (void)(lock); } while (0) + do { preempt_enable(); ___UNLOCK(lock); } while (0) #define __UNLOCK_BH(lock) \ - do { preempt_enable_no_resched(); local_bh_enable(); \ - __release(lock); (void)(lock); } while (0) + do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \ + ___UNLOCK(lock); } while (0) #define __UNLOCK_IRQ(lock) \ do { local_irq_enable(); __UNLOCK(lock); } while (0) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 94273bbe6050..40ed9e9a77e5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -38,6 +38,7 @@ struct rlimit; struct rlimit64; struct rusage; struct sched_param; +struct sched_attr; struct sel_arg_struct; struct semaphore; struct sembuf; @@ -279,9 +280,14 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param); asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param); +asmlinkage long sys_sched_setattr(pid_t pid, + struct sched_attr __user *attr); asmlinkage long sys_sched_getscheduler(pid_t pid); asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param); +asmlinkage long sys_sched_getattr(pid_t pid, + struct sched_attr __user *attr, + unsigned int size); asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr); asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, diff --git a/include/linux/tick.h b/include/linux/tick.h index 5128d33bbb39..0175d8663b6c 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void); extern void tick_clock_notify(void); extern int tick_check_oneshot_change(int allow_nohz); extern struct tick_sched *tick_get_tick_sched(int cpu); -extern void tick_check_idle(int cpu); +extern void tick_check_idle(void); extern int tick_oneshot_mode_active(void); # ifndef arch_needs_cpu # define arch_needs_cpu(cpu) (0) @@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void); # else static inline void tick_clock_notify(void) { } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } -static inline void tick_check_idle(int cpu) { } +static inline void tick_check_idle(void) { } static inline int tick_oneshot_mode_active(void) { return 0; } # endif @@ -121,7 +121,7 @@ static inline void tick_init(void) { } static inline void tick_cancel_sched_timer(int cpu) { } static inline void tick_clock_notify(void) { } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } -static inline void tick_check_idle(int cpu) { } +static inline void tick_check_idle(void) { } static inline int tick_oneshot_mode_active(void) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ @@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask; static inline bool tick_nohz_full_enabled(void) { - if (!static_key_false(&context_tracking_enabled)) + if (!context_tracking_is_enabled()) return false; return tick_nohz_full_running; diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index ebeab360d851..f16dc0a40049 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -267,6 +267,8 @@ static inline void tracepoint_synchronize_unregister(void) #define TRACE_EVENT_FLAGS(event, flag) +#define TRACE_EVENT_PERF_PERM(event, expr...) + #endif /* DECLARE_TRACE */ #ifndef TRACE_EVENT @@ -399,4 +401,6 @@ static inline void tracepoint_synchronize_unregister(void) #define TRACE_EVENT_FLAGS(event, flag) +#define TRACE_EVENT_PERF_PERM(event, expr...) + #endif /* ifdef TRACE_EVENT (see note above) */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 9d8cf056e661..ecd3319dac33 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -25,13 +25,16 @@ static inline void pagefault_disable(void) static inline void pagefault_enable(void) { +#ifndef CONFIG_PREEMPT /* * make sure to issue those last loads/stores before enabling * the pagefault handler again. */ barrier(); preempt_count_dec(); - preempt_check_resched(); +#else + preempt_enable(); +#endif } #ifndef ARCH_HAS_NOCACHE_UACCESS diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 319eae70fe84..e32251e00e62 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -26,16 +26,13 @@ #include <linux/errno.h> #include <linux/rbtree.h> +#include <linux/types.h> struct vm_area_struct; struct mm_struct; struct inode; struct notifier_block; -#ifdef CONFIG_ARCH_SUPPORTS_UPROBES -# include <asm/uprobes.h> -#endif - #define UPROBE_HANDLER_REMOVE 1 #define UPROBE_HANDLER_MASK 1 @@ -60,6 +57,8 @@ struct uprobe_consumer { }; #ifdef CONFIG_UPROBES +#include <asm/uprobes.h> + enum uprobe_task_state { UTASK_RUNNING, UTASK_SSTEP, @@ -72,35 +71,28 @@ enum uprobe_task_state { */ struct uprobe_task { enum uprobe_task_state state; - struct arch_uprobe_task autask; - struct return_instance *return_instances; - unsigned int depth; - struct uprobe *active_uprobe; + union { + struct { + struct arch_uprobe_task autask; + unsigned long vaddr; + }; + struct { + struct callback_head dup_xol_work; + unsigned long dup_xol_addr; + }; + }; + + struct uprobe *active_uprobe; unsigned long xol_vaddr; - unsigned long vaddr; -}; -/* - * On a breakpoint hit, thread contests for a slot. It frees the - * slot after singlestep. Currently a fixed number of slots are - * allocated. - */ -struct xol_area { - wait_queue_head_t wq; /* if all slots are busy */ - atomic_t slot_count; /* number of in-use slots */ - unsigned long *bitmap; /* 0 = free slot */ - struct page *page; - - /* - * We keep the vma's vm_start rather than a pointer to the vma - * itself. The probed process or a naughty kernel module could make - * the vma go away, and we must handle that reasonably gracefully. - */ - unsigned long vaddr; /* Page(s) of instruction slots */ + struct return_instance *return_instances; + unsigned int depth; }; +struct xol_area; + struct uprobes_state { struct xol_area *xol_area; }; @@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); extern bool __weak is_trap_insn(uprobe_opcode_t *insn); +extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); @@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void); extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); extern void uprobe_free_utask(struct task_struct *t); extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); -extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); extern int uprobe_post_sstep_notifier(struct pt_regs *regs); extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); extern void uprobe_notify_resume(struct pt_regs *regs); @@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void) { return false; } -static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) -{ - return 0; -} static inline void uprobe_free_utask(struct task_struct *t) { } diff --git a/include/linux/usb.h b/include/linux/usb.h index 7454865ad148..512ab162832c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1264,6 +1264,8 @@ typedef void (*usb_complete_t)(struct urb *); * @sg: scatter gather buffer list, the buffer size of each element in * the list (except the last) must be divisible by the endpoint's * max packet size if no_sg_constraint isn't set in 'struct usb_bus' + * (FIXME: scatter-gather under xHCI is broken for periodic transfers. + * Do not use urb->sg for interrupt endpoints for now, only bulk.) * @num_mapped_sgs: (internal) number of mapped sg entries * @num_sgs: number of entries in the sg list * @transfer_buffer_length: How big is transfer_buffer. The transfer may diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h index 0c4d4ca370ec..eeb28329fa3c 100644 --- a/include/linux/usb/wusb.h +++ b/include/linux/usb/wusb.h @@ -271,6 +271,8 @@ static inline u8 wusb_key_index(int index, int type, int originator) #define WUSB_KEY_INDEX_TYPE_GTK 2 #define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 #define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 +/* bits 0-3 used for the key index. */ +#define WUSB_KEY_INDEX_MAX 15 /* A CCM Nonce, defined in WUSB1.0[6.4.1] */ struct aes_ccm_nonce { diff --git a/include/linux/vtime.h b/include/linux/vtime.h index f5b72b364bda..c5165fd256f9 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; } #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN static inline bool vtime_accounting_enabled(void) { - if (static_key_false(&context_tracking_enabled)) { - if (context_tracking_active()) + if (context_tracking_is_enabled()) { + if (context_tracking_cpu_is_enabled()) return true; } diff --git a/include/linux/zorro.h b/include/linux/zorro.h index dff42025649b..63fbba0740c2 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h @@ -11,107 +11,10 @@ #ifndef _LINUX_ZORRO_H #define _LINUX_ZORRO_H -#include <linux/device.h> - - - /* - * Each Zorro board has a 32-bit ID of the form - * - * mmmmmmmmmmmmmmmmppppppppeeeeeeee - * - * with - * - * mmmmmmmmmmmmmmmm 16-bit Manufacturer ID (assigned by CBM (sigh)) - * pppppppp 8-bit Product ID (assigned by manufacturer) - * eeeeeeee 8-bit Extended Product ID (currently only used - * for some GVP boards) - */ - - -#define ZORRO_MANUF(id) ((id) >> 16) -#define ZORRO_PROD(id) (((id) >> 8) & 0xff) -#define ZORRO_EPC(id) ((id) & 0xff) - -#define ZORRO_ID(manuf, prod, epc) \ - ((ZORRO_MANUF_##manuf << 16) | ((prod) << 8) | (epc)) - -typedef __u32 zorro_id; - - -/* Include the ID list */ -#include <linux/zorro_ids.h> - - /* - * GVP identifies most of its products through the 'extended product code' - * (epc). The epc has to be ANDed with the GVP_PRODMASK before the - * identification. - */ - -#define GVP_PRODMASK (0xf8) -#define GVP_SCSICLKMASK (0x01) - -enum GVP_flags { - GVP_IO = 0x01, - GVP_ACCEL = 0x02, - GVP_SCSI = 0x04, - GVP_24BITDMA = 0x08, - GVP_25BITDMA = 0x10, - GVP_NOBANK = 0x20, - GVP_14MHZ = 0x40, -}; - - -struct Node { - struct Node *ln_Succ; /* Pointer to next (successor) */ - struct Node *ln_Pred; /* Pointer to previous (predecessor) */ - __u8 ln_Type; - __s8 ln_Pri; /* Priority, for sorting */ - __s8 *ln_Name; /* ID string, null terminated */ -} __attribute__ ((packed)); - -struct ExpansionRom { - /* -First 16 bytes of the expansion ROM */ - __u8 er_Type; /* Board type, size and flags */ - __u8 er_Product; /* Product number, assigned by manufacturer */ - __u8 er_Flags; /* Flags */ - __u8 er_Reserved03; /* Must be zero ($ff inverted) */ - __u16 er_Manufacturer; /* Unique ID, ASSIGNED BY COMMODORE-AMIGA! */ - __u32 er_SerialNumber; /* Available for use by manufacturer */ - __u16 er_InitDiagVec; /* Offset to optional "DiagArea" structure */ - __u8 er_Reserved0c; - __u8 er_Reserved0d; - __u8 er_Reserved0e; - __u8 er_Reserved0f; -} __attribute__ ((packed)); - -/* er_Type board type bits */ -#define ERT_TYPEMASK 0xc0 -#define ERT_ZORROII 0xc0 -#define ERT_ZORROIII 0x80 - -/* other bits defined in er_Type */ -#define ERTB_MEMLIST 5 /* Link RAM into free memory list */ -#define ERTF_MEMLIST (1<<5) - -struct ConfigDev { - struct Node cd_Node; - __u8 cd_Flags; /* (read/write) */ - __u8 cd_Pad; /* reserved */ - struct ExpansionRom cd_Rom; /* copy of board's expansion ROM */ - void *cd_BoardAddr; /* where in memory the board was placed */ - __u32 cd_BoardSize; /* size of board in bytes */ - __u16 cd_SlotAddr; /* which slot number (PRIVATE) */ - __u16 cd_SlotSize; /* number of slots (PRIVATE) */ - void *cd_Driver; /* pointer to node of driver */ - struct ConfigDev *cd_NextCD; /* linked list of drivers to config */ - __u32 cd_Unused[4]; /* for whatever the driver wants */ -} __attribute__ ((packed)); - -#define ZORRO_NUM_AUTO 16 - -#ifdef __KERNEL__ +#include <uapi/linux/zorro.h> +#include <linux/device.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/mod_devicetable.h> @@ -175,7 +78,23 @@ static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z) extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */ -extern struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; +extern struct zorro_dev *zorro_autocon; + + + /* + * Minimal information about a Zorro device, passed from bootinfo + * Only available temporarily, i.e. until initmem has been freed! + */ + +struct zorro_dev_init { + struct ExpansionRom rom; + u16 slotaddr; + u16 slotsize; + u32 boardaddr; + u32 boardsize; +}; + +extern struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata; /* @@ -229,6 +148,4 @@ extern DECLARE_BITMAP(zorro_unused_z2ram, 128); #define Z2RAM_CHUNKSHIFT (16) -#endif /* __KERNEL__ */ - #endif /* _LINUX_ZORRO_H */ diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index bd8218b15009..941055e9d125 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -83,7 +83,7 @@ struct vb2_fileio_data; struct vb2_mem_ops { void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags); void (*put)(void *buf_priv); - struct dma_buf *(*get_dmabuf)(void *buf_priv); + struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags); void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write); diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 829627d7b846..1d67fb6b23a0 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -42,27 +42,10 @@ static inline bool net_busy_loop_on(void) return sysctl_net_busy_poll; } -/* a wrapper to make debug_smp_processor_id() happy - * we can use sched_clock() because we don't care much about precision - * we only care that the average is bounded - */ -#ifdef CONFIG_DEBUG_PREEMPT -static inline u64 busy_loop_us_clock(void) -{ - u64 rc; - - preempt_disable_notrace(); - rc = sched_clock(); - preempt_enable_no_resched_notrace(); - - return rc >> 10; -} -#else /* CONFIG_DEBUG_PREEMPT */ static inline u64 busy_loop_us_clock(void) { - return sched_clock() >> 10; + return local_clock() >> 10; } -#endif /* CONFIG_DEBUG_PREEMPT */ static inline unsigned long sk_busy_loop_end_time(struct sock *sk) { diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 76d54270f2e2..65bb13035598 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -165,7 +165,6 @@ struct inet6_dev { struct net_device *dev; struct list_head addr_list; - int valid_ll_addr_cnt; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; diff --git a/include/net/ip.h b/include/net/ip.h index 217bc5bfc6c6..5a25f36fe3a7 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -473,7 +473,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); -int ip_recv_error(struct sock *sk, struct msghdr *msg, int len); +int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload); void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 2a5f668cd683..488316e339a1 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -110,7 +110,8 @@ struct frag_hdr { __be32 identification; }; -#define IP6_MF 0x0001 +#define IP6_MF 0x0001 +#define IP6_OFFSET 0xFFF8 #include <net/sock.h> @@ -776,8 +777,10 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); -int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len); -int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len); +int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); +int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload); void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 31e2de7d57c5..c0f0a13ed818 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -142,7 +142,7 @@ #define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) #define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) -#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO) +#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO) /* FRMR information field macros */ diff --git a/include/net/ping.h b/include/net/ping.h index 3f67704f3747..90f48417b03d 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -31,7 +31,8 @@ /* Compatibility glue so we can support IPv6 when it's compiled as a module */ struct pingv6_ops { - int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len); + int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, struct sk_buff *skb); int (*icmpv6_err_convert)(u8 type, u8 code, int *err); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 2174d8da0770..0a248b323d87 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -629,6 +629,7 @@ struct sctp_chunk { #define SCTP_NEED_FRTX 0x1 #define SCTP_DONT_FRTX 0x2 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ + resent:1, /* Has this chunk ever been resent. */ has_tsn:1, /* Does this chunk have a TSN yet? */ has_ssn:1, /* Does this chunk have a SSN yet? */ singleton:1, /* Only chunk in the packet? */ @@ -1045,9 +1046,6 @@ struct sctp_outq { /* Corked? */ char cork; - - /* Is this structure empty? */ - char empty; }; void sctp_outq_init(struct sctp_association *, struct sctp_outq *); @@ -1725,12 +1723,6 @@ struct sctp_association { /* How many duplicated TSNs have we seen? */ int numduptsns; - /* Number of seconds of idle time before an association is closed. - * In the association context, this is really used as a boolean - * since the real timeout is stored in the timeouts array - */ - __u32 autoclose; - /* These are to support * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses * and Enforcement of Flow and Message Limits" diff --git a/include/net/sock.h b/include/net/sock.h index e3a18ff0c38b..2ef3c3eca47a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1035,7 +1035,6 @@ enum cg_proto_flags { }; struct cg_proto { - void (*enter_memory_pressure)(struct sock *sk); struct res_counter memory_allocated; /* Current allocated memory. */ struct percpu_counter sockets_allocated; /* Current number of sockets. */ int memory_pressure; @@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk) struct proto *prot = sk->sk_prot; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - if (cg_proto->memory_pressure) - cg_proto->memory_pressure = 0; + cg_proto->memory_pressure = 0; } } @@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk) struct proto *prot = sk->sk_prot; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - cg_proto->enter_memory_pressure(sk); + cg_proto->memory_pressure = 1; } sk->sk_prot->enter_memory_pressure(sk); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 979874c627ee..61e1935c91b1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -978,7 +978,7 @@ struct ib_uobject { }; struct ib_udata { - void __user *inbuf; + const void __user *inbuf; void __user *outbuf; size_t inlen; size_t outlen; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 546084964d55..fe3b58e836c8 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -475,6 +475,9 @@ struct scsi_host_template { */ unsigned ordered_tag:1; + /* True if the controller does not support WRITE SAME */ + unsigned no_write_same:1; + /* * Countdown for host blocking with no commands outstanding. */ @@ -677,6 +680,9 @@ struct Scsi_Host { /* Don't resume host in EH */ unsigned eh_noresume:1; + /* The controller does not support WRITE SAME */ + unsigned no_write_same:1; + /* * Optional work queue to be utilized by the transport */ diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index af9983970417..5f73785f5977 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h @@ -108,7 +108,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, { struct snd_sg_buf *sgbuf = dmab->private_data; dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; - addr &= PAGE_MASK; + addr &= ~((dma_addr_t)PAGE_SIZE - 1); return addr + offset % PAGE_SIZE; } diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 2037c45adfe6..56ebdfca6273 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -104,7 +104,8 @@ struct device; SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \ .kcontrol_news = wcontrols, .num_kcontrols = 1} #define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \ -{ .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, \ +{ .id = snd_soc_dapm_mux, .name = wname, \ + SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \ .kcontrol_news = wcontrols, .num_kcontrols = 1} #define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \ { .id = snd_soc_dapm_virt_mux, .name = wname, \ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 45412a6afa69..321301c0a643 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -517,10 +517,6 @@ struct se_node_acl { u32 acl_index; #define MAX_ACL_TAG_SIZE 64 char acl_tag[MAX_ACL_TAG_SIZE]; - u64 num_cmds; - u64 read_bytes; - u64 write_bytes; - spinlock_t stats_lock; /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ atomic_t acl_pr_ref_count; struct se_dev_entry **device_list; @@ -624,6 +620,7 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; + u32 max_bytes_per_io; struct se_device *da_dev; struct config_group da_group; }; diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h index 88b878383797..1c875ad1ee5f 100644 --- a/include/trace/events/ras.h +++ b/include/trace/events/ras.h @@ -5,7 +5,7 @@ #define _TRACE_AER_H #include <linux/tracepoint.h> -#include <linux/edac.h> +#include <linux/aer.h> /* @@ -63,10 +63,10 @@ TRACE_EVENT(aer_event, TP_printk("%s PCIe Bus Error: severity=%s, %s\n", __get_str(dev_name), - __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" : - __entry->severity == HW_EVENT_ERR_FATAL ? - "Fatal" : "Uncorrected", - __entry->severity == HW_EVENT_ERR_CORRECTED ? + __entry->severity == AER_CORRECTABLE ? "Corrected" : + __entry->severity == AER_FATAL ? + "Fatal" : "Uncorrected, non-fatal", + __entry->severity == AER_CORRECTABLE ? __print_flags(__entry->status, "|", aer_correctable_errors) : __print_flags(__entry->status, "|", aer_uncorrectable_errors)) ); diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index d17a35c6537e..5c38606613d8 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -90,6 +90,10 @@ #define TRACE_EVENT_FLAGS(name, value) \ __TRACE_EVENT_FLAGS(name, value) +#undef TRACE_EVENT_PERF_PERM +#define TRACE_EVENT_PERF_PERM(name, expr...) \ + __TRACE_EVENT_PERF_PERM(name, expr) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -140,6 +144,9 @@ #undef TRACE_EVENT_FLAGS #define TRACE_EVENT_FLAGS(event, flag) +#undef TRACE_EVENT_PERF_PERM +#define TRACE_EVENT_PERF_PERM(event, expr...) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 2f3f7ea8c77b..fe421e8a431b 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -983,6 +983,8 @@ struct drm_radeon_cs { #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 /* CIK macrotile mode array */ #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 +/* query the number of render backends */ +#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 struct drm_radeon_info { diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index bcb0912afe7a..f854ca4a1372 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -75,6 +75,7 @@ #define DRM_VMW_PARAM_FIFO_CAPS 4 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 +#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 /** * struct drm_vmw_getparam_arg diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 33d2b8fe166d..3ce25b5d75a9 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -426,3 +426,5 @@ header-y += x25.h header-y += xattr.h header-y += xfrm.h header-y += hw_breakpoint.h +header-y += zorro.h +header-y += zorro_ids.h diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h index 2c267bcbb85c..bc81fb2e1f0e 100644 --- a/include/uapi/linux/eventpoll.h +++ b/include/uapi/linux/eventpoll.h @@ -61,5 +61,16 @@ struct epoll_event { __u64 data; } EPOLL_PACKED; - +#ifdef CONFIG_PM_SLEEP +static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev) +{ + if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) + epev->events &= ~EPOLLWAKEUP; +} +#else +static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev) +{ + epev->events &= ~EPOLLWAKEUP; +} +#endif #endif /* _UAPI_LINUX_EVENTPOLL_H */ diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h index 1af72d8228e0..c3363ba1ae05 100644 --- a/include/uapi/linux/genetlink.h +++ b/include/uapi/linux/genetlink.h @@ -28,6 +28,7 @@ struct genlmsghdr { #define GENL_ID_GENERATE 0 #define GENL_ID_CTRL NLMSG_MIN_TYPE #define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1) +#define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2) /************************************************************************** * Controller diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index b78566f59aba..6db460121f84 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -488,7 +488,9 @@ enum { IFLA_HSR_UNSPEC, IFLA_HSR_SLAVE1, IFLA_HSR_SLAVE2, - IFLA_HSR_MULTICAST_SPEC, + IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ + IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ + IFLA_HSR_SEQ_NR, __IFLA_HSR_MAX, }; diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index a3726275876d..bd24470d24a2 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -464,7 +464,8 @@ struct input_keymap_entry { #define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ #define KEY_DISPLAY_OFF 245 /* display device to off state */ -#define KEY_WIMAX 246 +#define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */ +#define KEY_WIMAX KEY_WWAN #define KEY_RFKILL 247 /* Key that controls all radios */ #define KEY_MICMUTE 248 /* Mute / unmute the microphone */ @@ -719,6 +720,8 @@ struct input_keymap_entry { #define BTN_DPAD_LEFT 0x222 #define BTN_DPAD_RIGHT 0x223 +#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */ + #define BTN_TRIGGER_HAPPY 0x2c0 #define BTN_TRIGGER_HAPPY1 0x2c0 #define BTN_TRIGGER_HAPPY2 0x2c1 @@ -856,6 +859,7 @@ struct input_keymap_entry { #define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */ #define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ #define SW_LINEIN_INSERT 0x0d /* set = inserted */ +#define SW_MUTE_DEVICE 0x0e /* set = device disabled */ #define SW_MAX 0x0f #define SW_CNT (SW_MAX+1) diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 104838f65bc1..d6629d49a243 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -18,6 +18,7 @@ */ #define KEXEC_ARCH_DEFAULT ( 0 << 16) #define KEXEC_ARCH_386 ( 3 << 16) +#define KEXEC_ARCH_68K ( 4 << 16) #define KEXEC_ARCH_X86_64 (62 << 16) #define KEXEC_ARCH_PPC (20 << 16) #define KEXEC_ARCH_PPC64 (21 << 16) diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h index 17e7d95e4f53..6eb40244e019 100644 --- a/include/uapi/linux/mic_common.h +++ b/include/uapi/linux/mic_common.h @@ -23,12 +23,7 @@ #include <linux/virtio_ring.h> -#ifndef __KERNEL__ -#define ALIGN(a, x) (((a) + (x) - 1) & ~((x) - 1)) -#define __aligned(x) __attribute__ ((aligned(x))) -#endif - -#define mic_aligned_size(x) ALIGN(sizeof(x), 8) +#define __mic_align(a, x) (((a) + (x) - 1) & ~((x) - 1)) /** * struct mic_device_desc: Virtio device information shared between the @@ -48,8 +43,8 @@ struct mic_device_desc { __u8 feature_len; __u8 config_len; __u8 status; - __u64 config[0]; -} __aligned(8); + __le64 config[0]; +} __attribute__ ((aligned(8))); /** * struct mic_device_ctrl: Per virtio device information in the device page @@ -66,7 +61,7 @@ struct mic_device_desc { * @h2c_vdev_db: The doorbell number to be used by host. Set by guest. */ struct mic_device_ctrl { - __u64 vdev; + __le64 vdev; __u8 config_change; __u8 vdev_reset; __u8 guest_ack; @@ -74,7 +69,7 @@ struct mic_device_ctrl { __u8 used_address_updated; __s8 c2h_vdev_db; __s8 h2c_vdev_db; -} __aligned(8); +} __attribute__ ((aligned(8))); /** * struct mic_bootparam: Virtio device independent information in device page @@ -87,13 +82,13 @@ struct mic_device_ctrl { * @shutdown_card: Set to 1 by the host when a card shutdown is initiated */ struct mic_bootparam { - __u32 magic; + __le32 magic; __s8 c2h_shutdown_db; __s8 h2c_shutdown_db; __s8 h2c_config_db; __u8 shutdown_status; __u8 shutdown_card; -} __aligned(8); +} __attribute__ ((aligned(8))); /** * struct mic_device_page: High level representation of the device page @@ -116,10 +111,10 @@ struct mic_device_page { * @num: The number of entries in the virtio_ring */ struct mic_vqconfig { - __u64 address; - __u64 used_address; - __u16 num; -} __aligned(8); + __le64 address; + __le64 used_address; + __le16 num; +} __attribute__ ((aligned(8))); /* * The alignment to use between consumer and producer parts of vring. @@ -154,7 +149,7 @@ struct mic_vqconfig { */ struct _mic_vring_info { __u16 avail_idx; - int magic; + __le32 magic; }; /** @@ -173,15 +168,13 @@ struct mic_vring { int len; }; -#define mic_aligned_desc_size(d) ALIGN(mic_desc_size(d), 8) +#define mic_aligned_desc_size(d) __mic_align(mic_desc_size(d), 8) #ifndef INTEL_MIC_CARD static inline unsigned mic_desc_size(const struct mic_device_desc *desc) { - return mic_aligned_size(*desc) - + desc->num_vq * mic_aligned_size(struct mic_vqconfig) - + desc->feature_len * 2 - + desc->config_len; + return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig) + + desc->feature_len * 2 + desc->config_len; } static inline struct mic_vqconfig * @@ -201,8 +194,7 @@ static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc) } static inline unsigned mic_total_desc_size(struct mic_device_desc *desc) { - return mic_aligned_desc_size(desc) + - mic_aligned_size(struct mic_device_ctrl); + return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); } #endif diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h index 4e31db4eea41..f2159d30d1f5 100644 --- a/include/uapi/linux/netlink_diag.h +++ b/include/uapi/linux/netlink_diag.h @@ -33,6 +33,7 @@ struct netlink_diag_ring { }; enum { + /* NETLINK_DIAG_NONE, standard nl API requires this attribute! */ NETLINK_DIAG_MEMINFO, NETLINK_DIAG_GROUPS, NETLINK_DIAG_RX_RING, diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index b2cc0cd9c4d9..d08c63f3dd6f 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h @@ -29,6 +29,7 @@ struct packet_diag_msg { }; enum { + /* PACKET_DIAG_NONE, standard nl API requires this attribute! */ PACKET_DIAG_INFO, PACKET_DIAG_MCLIST, PACKET_DIAG_RX_RING, diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e1802d6153ae..e244ed412745 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -679,6 +679,7 @@ enum perf_event_type { * * { u64 weight; } && PERF_SAMPLE_WEIGHT * { u64 data_src; } && PERF_SAMPLE_DATA_SRC + * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * }; */ PERF_RECORD_SAMPLE = 9, @@ -724,6 +725,7 @@ enum perf_callchain_context { #define PERF_FLAG_FD_NO_GROUP (1U << 0) #define PERF_FLAG_FD_OUTPUT (1U << 1) #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ +#define PERF_FLAG_FD_CLOEXEC (1U << 3) /* O_CLOEXEC */ union perf_mem_data_src { __u64 val; diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index 5a0f945927ac..34f9d7387d13 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -39,8 +39,14 @@ #define SCHED_BATCH 3 /* SCHED_ISO: reserved but not implemented yet */ #define SCHED_IDLE 5 +#define SCHED_DEADLINE 6 + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ #define SCHED_RESET_ON_FORK 0x40000000 +/* + * For the sched_{set,get}attr() calls + */ +#define SCHED_FLAG_RESET_ON_FORK 0x01 #endif /* _UAPI_LINUX_SCHED_H */ diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h index b9e2a6a7446f..1eb0b8dd1830 100644 --- a/include/uapi/linux/unix_diag.h +++ b/include/uapi/linux/unix_diag.h @@ -31,6 +31,7 @@ struct unix_diag_msg { }; enum { + /* UNIX_DIAG_NONE, standard nl API requires this attribute! */ UNIX_DIAG_NAME, UNIX_DIAG_VFS, UNIX_DIAG_PEER, diff --git a/include/uapi/linux/zorro.h b/include/uapi/linux/zorro.h new file mode 100644 index 000000000000..59d021b242ed --- /dev/null +++ b/include/uapi/linux/zorro.h @@ -0,0 +1,113 @@ +/* + * linux/zorro.h -- Amiga AutoConfig (Zorro) Bus Definitions + * + * Copyright (C) 1995--2003 Geert Uytterhoeven + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _UAPI_LINUX_ZORRO_H +#define _UAPI_LINUX_ZORRO_H + +#include <linux/types.h> + + + /* + * Each Zorro board has a 32-bit ID of the form + * + * mmmmmmmmmmmmmmmmppppppppeeeeeeee + * + * with + * + * mmmmmmmmmmmmmmmm 16-bit Manufacturer ID (assigned by CBM (sigh)) + * pppppppp 8-bit Product ID (assigned by manufacturer) + * eeeeeeee 8-bit Extended Product ID (currently only used + * for some GVP boards) + */ + + +#define ZORRO_MANUF(id) ((id) >> 16) +#define ZORRO_PROD(id) (((id) >> 8) & 0xff) +#define ZORRO_EPC(id) ((id) & 0xff) + +#define ZORRO_ID(manuf, prod, epc) \ + ((ZORRO_MANUF_##manuf << 16) | ((prod) << 8) | (epc)) + +typedef __u32 zorro_id; + + +/* Include the ID list */ +#include <linux/zorro_ids.h> + + + /* + * GVP identifies most of its products through the 'extended product code' + * (epc). The epc has to be ANDed with the GVP_PRODMASK before the + * identification. + */ + +#define GVP_PRODMASK (0xf8) +#define GVP_SCSICLKMASK (0x01) + +enum GVP_flags { + GVP_IO = 0x01, + GVP_ACCEL = 0x02, + GVP_SCSI = 0x04, + GVP_24BITDMA = 0x08, + GVP_25BITDMA = 0x10, + GVP_NOBANK = 0x20, + GVP_14MHZ = 0x40, +}; + + +struct Node { + __be32 ln_Succ; /* Pointer to next (successor) */ + __be32 ln_Pred; /* Pointer to previous (predecessor) */ + __u8 ln_Type; + __s8 ln_Pri; /* Priority, for sorting */ + __be32 ln_Name; /* ID string, null terminated */ +} __packed; + +struct ExpansionRom { + /* -First 16 bytes of the expansion ROM */ + __u8 er_Type; /* Board type, size and flags */ + __u8 er_Product; /* Product number, assigned by manufacturer */ + __u8 er_Flags; /* Flags */ + __u8 er_Reserved03; /* Must be zero ($ff inverted) */ + __be16 er_Manufacturer; /* Unique ID, ASSIGNED BY COMMODORE-AMIGA! */ + __be32 er_SerialNumber; /* Available for use by manufacturer */ + __be16 er_InitDiagVec; /* Offset to optional "DiagArea" structure */ + __u8 er_Reserved0c; + __u8 er_Reserved0d; + __u8 er_Reserved0e; + __u8 er_Reserved0f; +} __packed; + +/* er_Type board type bits */ +#define ERT_TYPEMASK 0xc0 +#define ERT_ZORROII 0xc0 +#define ERT_ZORROIII 0x80 + +/* other bits defined in er_Type */ +#define ERTB_MEMLIST 5 /* Link RAM into free memory list */ +#define ERTF_MEMLIST (1<<5) + +struct ConfigDev { + struct Node cd_Node; + __u8 cd_Flags; /* (read/write) */ + __u8 cd_Pad; /* reserved */ + struct ExpansionRom cd_Rom; /* copy of board's expansion ROM */ + __be32 cd_BoardAddr; /* where in memory the board was placed */ + __be32 cd_BoardSize; /* size of board in bytes */ + __be16 cd_SlotAddr; /* which slot number (PRIVATE) */ + __be16 cd_SlotSize; /* number of slots (PRIVATE) */ + __be32 cd_Driver; /* pointer to node of driver */ + __be32 cd_NextCD; /* linked list of drivers to config */ + __be32 cd_Unused[4]; /* for whatever the driver wants */ +} __packed; + +#define ZORRO_NUM_AUTO 16 + +#endif /* _UAPI_LINUX_ZORRO_H */ diff --git a/include/linux/zorro_ids.h b/include/uapi/linux/zorro_ids.h index 74bc53bcfdcf..74bc53bcfdcf 100644 --- a/include/linux/zorro_ids.h +++ b/include/uapi/linux/zorro_ids.h diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index d630163b9a2e..5759810e1c1b 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -30,7 +30,7 @@ #include <sound/compress_params.h> -#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1) +#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2) /** * struct snd_compressed_buffer: compressed buffer * @fragment_size: size of buffer fragment in bytes @@ -67,8 +67,8 @@ struct snd_compr_params { struct snd_compr_tstamp { __u32 byte_offset; __u32 copied_total; - snd_pcm_uframes_t pcm_frames; - snd_pcm_uframes_t pcm_io_frames; + __u32 pcm_frames; + __u32 pcm_io_frames; __u32 sampling_rate; }; diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h index 8c5fa0e20155..dc3193f4b581 100644 --- a/include/xen/interface/callback.h +++ b/include/xen/interface/callback.h @@ -36,7 +36,7 @@ * @extra_args == Operation-specific extra arguments (NULL if none). */ -/* ia64, x86: Callback for event delivery. */ +/* x86: Callback for event delivery. */ #define CALLBACKTYPE_event 0 /* x86: Failsafe callback when guest state cannot be restored by Xen. */ diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 65e12099ef89..ae665ac59c36 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -146,7 +146,7 @@ struct blkif_request_segment_aligned { struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -163,7 +163,7 @@ struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -175,7 +175,7 @@ struct blkif_request_discard { struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -184,7 +184,7 @@ struct blkif_request_other { struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; @@ -192,7 +192,7 @@ struct blkif_request_indirect { blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h index 056744b4b05e..545a14ba0bb3 100644 --- a/include/xen/interface/io/protocols.h +++ b/include/xen/interface/io/protocols.h @@ -3,7 +3,6 @@ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" -#define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi" #define XEN_IO_PROTO_ABI_ARM "arm-abi" @@ -11,8 +10,6 @@ # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 -#elif defined(__ia64__) -# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #elif defined(__powerpc64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 #elif defined(__arm__) || defined(__aarch64__) |