diff options
Diffstat (limited to 'include/linux')
106 files changed, 1547 insertions, 828 deletions
diff --git a/include/linux/a.out.h b/include/linux/a.out.h index 220f14338895..ee884168989f 100644 --- a/include/linux/a.out.h +++ b/include/linux/a.out.h @@ -4,44 +4,6 @@ #include <uapi/linux/a.out.h> #ifndef __ASSEMBLY__ -#if defined (M_OLDSUN2) -#else -#endif -#if defined (M_68010) -#else -#endif -#if defined (M_68020) -#else -#endif -#if defined (M_SPARC) -#else -#endif -#if !defined (N_MAGIC) -#endif -#if !defined (N_BADMAG) -#endif -#if !defined (N_TXTOFF) -#endif -#if !defined (N_DATOFF) -#endif -#if !defined (N_TRELOFF) -#endif -#if !defined (N_DRELOFF) -#endif -#if !defined (N_SYMOFF) -#endif -#if !defined (N_STROFF) -#endif -#if !defined (N_TXTADDR) -#endif -#if defined(vax) || defined(hp300) || defined(pyr) -#endif -#ifdef sony -#endif /* Sony. */ -#ifdef is68k -#endif -#if defined(m68k) && defined(PORTAR) -#endif #ifdef linux #include <asm/page.h> #if defined(__i386__) || defined(__mc68000__) @@ -51,34 +13,5 @@ #endif #endif #endif -#ifndef N_DATADDR -#endif -#if !defined (N_BSSADDR) -#endif -#if !defined (N_NLIST_DECLARED) -#endif /* no N_NLIST_DECLARED. */ -#if !defined (N_UNDF) -#endif -#if !defined (N_ABS) -#endif -#if !defined (N_TEXT) -#endif -#if !defined (N_DATA) -#endif -#if !defined (N_BSS) -#endif -#if !defined (N_FN) -#endif -#if !defined (N_EXT) -#endif -#if !defined (N_TYPE) -#endif -#if !defined (N_STAB) -#endif -#if !defined (N_RELOCATION_INFO_DECLARED) -#ifdef NS32K -#else -#endif -#endif /* no N_RELOCATION_INFO_DECLARED. */ #endif /*__ASSEMBLY__ */ #endif /* __A_OUT_GNU_H__ */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 994739da827f..e34f906647d3 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -434,6 +434,27 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus, return bcma_find_core_unit(bus, coreid, 0); } +#ifdef CONFIG_BCMA_HOST_PCI +extern void bcma_host_pci_up(struct bcma_bus *bus); +extern void bcma_host_pci_down(struct bcma_bus *bus); +extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus, + struct bcma_device *core, bool enable); +#else +static inline void bcma_host_pci_up(struct bcma_bus *bus) +{ +} +static inline void bcma_host_pci_down(struct bcma_bus *bus) +{ +} +static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus, + struct bcma_device *core, bool enable) +{ + if (bus->hosttype == BCMA_HOSTTYPE_PCI) + return -ENOTSUPP; + return 0; +} +#endif + extern bool bcma_core_is_enabled(struct bcma_device *core); extern void bcma_core_disable(struct bcma_device *core, u32 flags); extern int bcma_core_enable(struct bcma_device *core, u32 flags); diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index db6fa217f98b..6cceedf65ca2 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -663,14 +663,6 @@ struct bcma_drv_cc_b { #define bcma_cc_maskset32(cc, offset, mask, set) \ bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) -extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); -extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); - -extern void bcma_chipco_suspend(struct bcma_drv_cc *cc); -extern void bcma_chipco_resume(struct bcma_drv_cc *cc); - -void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); - extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); @@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value); u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value); /* PMU support */ -extern void bcma_pmu_init(struct bcma_drv_cc *cc); -extern void bcma_pmu_early_init(struct bcma_drv_cc *cc); - extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value); extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h index 4dd1f33e36a2..4354d4ea6713 100644 --- a/include/linux/bcma/bcma_driver_gmac_cmn.h +++ b/include/linux/bcma/bcma_driver_gmac_cmn.h @@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn { #define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val) #define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val) -#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN -extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc); -#else -static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { } -#endif - #endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index 0b3b32aeeb8a..8eea7f9e33b4 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -39,21 +39,6 @@ struct bcma_drv_mips { u8 early_setup_done:1; }; -#ifdef CONFIG_BCMA_DRIVER_MIPS -extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); -extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); - -extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); -#else -static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } -static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } - -static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) -{ - return 0; -} -#endif - extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore); #endif /* LINUX_BCMA_DRIVER_MIPS_H_ */ diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 3f809ae372c4..5ba6918ca20b 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -238,13 +238,13 @@ struct bcma_drv_pci { #define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) -extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc); -extern void bcma_core_pci_init(struct bcma_drv_pci *pc); -extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, - struct bcma_device *core, bool enable); -extern void bcma_core_pci_up(struct bcma_bus *bus); -extern void bcma_core_pci_down(struct bcma_bus *bus); +#ifdef CONFIG_BCMA_DRIVER_PCI extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up); +#else +static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) +{ +} +#endif extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h index 5988b05781c3..31e6d17ab798 100644 --- a/include/linux/bcma/bcma_driver_pcie2.h +++ b/include/linux/bcma/bcma_driver_pcie2.h @@ -143,6 +143,8 @@ struct bcma_drv_pcie2 { struct bcma_device *core; + + u16 reqsize; }; #define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset) @@ -153,6 +155,4 @@ struct bcma_drv_pcie2 { #define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set) #define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask) -void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2); - #endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index dbfbf4990005..ea17cca9e685 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -172,12 +172,8 @@ extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int extern int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); -#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) -#define BITMAP_LAST_WORD_MASK(nbits) \ -( \ - ((nbits) % BITS_PER_LONG) ? \ - (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ -) +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) +#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) #define small_const_nbits(nbits) \ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) @@ -287,16 +283,16 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits) { if (small_const_nbits(nbits)) return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); - else - return __bitmap_empty(src, nbits); + + return find_first_bit(src, nbits) == nbits; } static inline int bitmap_full(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); - else - return __bitmap_full(src, nbits); + + return find_first_zero_bit(src, nbits) == nbits; } static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 5d858e02997f..297f5bda4fdf 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -218,9 +218,9 @@ static inline unsigned long __ffs64(u64 word) /** * find_last_bit - find the last set bit in a memory region * @addr: The address to start the search at - * @size: The maximum size to search + * @size: The number of bits to search * - * Returns the bit number of the first set bit, or size. + * Returns the bit number of the last set bit, or size. */ extern unsigned long find_last_bit(const unsigned long *addr, unsigned long size); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 7aec86127335..8210e8797c12 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -164,6 +164,8 @@ enum { << BLK_MQ_F_ALLOC_POLICY_START_BIT) struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); +struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q); void blk_mq_finish_init(struct request_queue *q); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); @@ -218,6 +220,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, void *priv); @@ -227,7 +230,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q); /* * Driver command data is immediately after the request. So subtract request - * size to get back to the original request. + * size to get back to the original request, add request size to get the PDU. */ static inline struct request *blk_mq_rq_from_pdu(void *pdu) { @@ -235,7 +238,7 @@ static inline struct request *blk_mq_rq_from_pdu(void *pdu) } static inline void *blk_mq_rq_to_pdu(struct request *rq) { - return (void *) rq + sizeof(*rq); + return rq + 1; } #define queue_for_each_hw_ctx(q, hctx, i) \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c2e21113ecc0..d5cda067115a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -32,23 +32,19 @@ struct bpf_map { u32 key_size; u32 value_size; u32 max_entries; - struct bpf_map_ops *ops; + const struct bpf_map_ops *ops; struct work_struct work; }; struct bpf_map_type_list { struct list_head list_node; - struct bpf_map_ops *ops; + const struct bpf_map_ops *ops; enum bpf_map_type type; }; -void bpf_register_map_type(struct bpf_map_type_list *tl); -void bpf_map_put(struct bpf_map *map); -struct bpf_map *bpf_map_get(struct fd f); - /* function argument constraints */ enum bpf_arg_type { - ARG_ANYTHING = 0, /* any argument is ok */ + ARG_DONTCARE = 0, /* unused argument in helper function */ /* the following constraints used to prototype * bpf_map_lookup/update/delete_elem() functions @@ -62,6 +58,9 @@ enum bpf_arg_type { */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ + + ARG_PTR_TO_CTX, /* pointer to context */ + ARG_ANYTHING, /* any (initialized) argument is ok */ }; /* type of values returned from helper functions */ @@ -105,11 +104,14 @@ struct bpf_verifier_ops { * with 'type' (read or write) is allowed */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type); + + u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off, + struct bpf_insn *insn); }; struct bpf_prog_type_list { struct list_head list_node; - struct bpf_verifier_ops *ops; + const struct bpf_verifier_ops *ops; enum bpf_prog_type type; }; @@ -117,20 +119,25 @@ struct bpf_prog; struct bpf_prog_aux { atomic_t refcnt; - bool is_gpl_compatible; - enum bpf_prog_type prog_type; - struct bpf_verifier_ops *ops; - struct bpf_map **used_maps; u32 used_map_cnt; + const struct bpf_verifier_ops *ops; + struct bpf_map **used_maps; struct bpf_prog *prog; struct work_struct work; }; #ifdef CONFIG_BPF_SYSCALL void bpf_register_prog_type(struct bpf_prog_type_list *tl); +void bpf_register_map_type(struct bpf_map_type_list *tl); -void bpf_prog_put(struct bpf_prog *prog); struct bpf_prog *bpf_prog_get(u32 ufd); +void bpf_prog_put(struct bpf_prog *prog); + +struct bpf_map *bpf_map_get(struct fd f); +void bpf_map_put(struct bpf_map *map); + +/* verify correctness of eBPF program */ +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); #else static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl) { @@ -144,14 +151,14 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd) static inline void bpf_prog_put(struct bpf_prog *prog) { } -#endif - -/* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); +#endif /* CONFIG_BPF_SYSCALL */ /* verifier prototypes for helper functions called from eBPF programs */ -extern struct bpf_func_proto bpf_map_lookup_elem_proto; -extern struct bpf_func_proto bpf_map_update_elem_proto; -extern struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_map_lookup_elem_proto; +extern const struct bpf_func_proto bpf_map_update_elem_proto; +extern const struct bpf_func_proto bpf_map_delete_elem_proto; + +extern const struct bpf_func_proto bpf_get_prandom_u32_proto; +extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; #endif /* _LINUX_BPF_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 7ccd928cc1f2..ae2982c0f7a6 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -11,6 +11,7 @@ #define PHY_ID_BCM5421 0x002060e0 #define PHY_ID_BCM5464 0x002060b0 #define PHY_ID_BCM5461 0x002060c0 +#define PHY_ID_BCM54616S 0x03625d10 #define PHY_ID_BCM57780 0x03625d90 #define PHY_ID_BCM7250 0xae025280 @@ -19,6 +20,7 @@ #define PHY_ID_BCM7425 0x03625e60 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7439 0x600d8480 +#define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 #define PHY_BCM_OUI_MASK 0xfffffc00 diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index c05ff0f9f9a5..c3a9c8fc60fa 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -61,6 +61,8 @@ struct can_priv { char tx_led_trig_name[CAN_LED_NAME_SZ]; struct led_trigger *rx_led_trig; char rx_led_trig_name[CAN_LED_NAME_SZ]; + struct led_trigger *rxtx_led_trig; + char rxtx_led_trig_name[CAN_LED_NAME_SZ]; #endif }; diff --git a/include/linux/can/led.h b/include/linux/can/led.h index e0475c5cbb92..146de4506d21 100644 --- a/include/linux/can/led.h +++ b/include/linux/can/led.h @@ -21,8 +21,10 @@ enum can_led_event { #ifdef CONFIG_CAN_LEDS -/* keep space for interface name + "-tx"/"-rx" suffix and null terminator */ -#define CAN_LED_NAME_SZ (IFNAMSIZ + 4) +/* keep space for interface name + "-tx"/"-rx"/"-rxtx" + * suffix and null terminator + */ +#define CAN_LED_NAME_SZ (IFNAMSIZ + 6) void can_led_event(struct net_device *netdev, enum can_led_event event); void devm_can_led_init(struct net_device *netdev); diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index cc00d15c6107..b6a52a4b457a 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -44,16 +44,11 @@ static inline void can_skb_reserve(struct sk_buff *skb) skb_reserve(skb, sizeof(struct can_skb_priv)); } -static inline void can_skb_destructor(struct sk_buff *skb) -{ - sock_put(skb->sk); -} - static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) { if (sk) { sock_hold(sk); - skb->destructor = can_skb_destructor; + skb->destructor = sock_efree; skb->sk = sk; } } diff --git a/include/linux/capability.h b/include/linux/capability.h index aa93e5ef594c..af9f0b9e80e6 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -205,6 +205,7 @@ static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a, cap_intersect(permitted, __cap_nfsd_set)); } +#ifdef CONFIG_MULTIUSER extern bool has_capability(struct task_struct *t, int cap); extern bool has_ns_capability(struct task_struct *t, struct user_namespace *ns, int cap); @@ -213,6 +214,34 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, struct user_namespace *ns, int cap); extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); +#else +static inline bool has_capability(struct task_struct *t, int cap) +{ + return true; +} +static inline bool has_ns_capability(struct task_struct *t, + struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool has_capability_noaudit(struct task_struct *t, int cap) +{ + return true; +} +static inline bool has_ns_capability_noaudit(struct task_struct *t, + struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool capable(int cap) +{ + return true; +} +static inline bool ns_capable(struct user_namespace *ns, int cap) +{ + return true; +} +#endif /* CONFIG_MULTIUSER */ extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index a014559e4a49..aa8f61cf3a19 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -34,6 +34,7 @@ extern int sysctl_compaction_handler(struct ctl_table *table, int write, extern int sysctl_extfrag_threshold; extern int sysctl_extfrag_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); +extern int sysctl_compact_unevictable_allowed; extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, diff --git a/include/linux/cred.h b/include/linux/cred.h index 2fb2ca2127ed..8b6c083e68a7 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -62,9 +62,27 @@ do { \ groups_free(group_info); \ } while (0) -extern struct group_info *groups_alloc(int); extern struct group_info init_groups; +#ifdef CONFIG_MULTIUSER +extern struct group_info *groups_alloc(int); extern void groups_free(struct group_info *); + +extern int in_group_p(kgid_t); +extern int in_egroup_p(kgid_t); +#else +static inline void groups_free(struct group_info *group_info) +{ +} + +static inline int in_group_p(kgid_t grp) +{ + return 1; +} +static inline int in_egroup_p(kgid_t grp) +{ + return 1; +} +#endif extern int set_current_groups(struct group_info *); extern void set_groups(struct cred *, struct group_info *); extern int groups_search(const struct group_info *, kgid_t); @@ -74,9 +92,6 @@ extern bool may_setgroups(void); #define GROUP_AT(gi, i) \ ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK]) -extern int in_group_p(kgid_t); -extern int in_egroup_p(kgid_t); - /* * The security context of a task * diff --git a/include/linux/crypto.h b/include/linux/crypto.h index fb5ef16d6a12..10df5d2d093a 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -95,6 +95,12 @@ #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 /* + * Mark a cipher as a service implementation only usable by another + * cipher and never by a normal user of the kernel crypto API + */ +#define CRYPTO_ALG_INTERNAL 0x00002000 + +/* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_REQ_MASK 0x000fff00 diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d8358799c594..df334cbacc6d 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -404,26 +404,11 @@ static inline bool d_mountpoint(const struct dentry *dentry) /* * Directory cache entry type accessor functions. */ -static inline void __d_set_type(struct dentry *dentry, unsigned type) -{ - dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type; -} - -static inline void __d_clear_type(struct dentry *dentry) -{ - __d_set_type(dentry, DCACHE_MISS_TYPE); -} - -static inline void d_set_type(struct dentry *dentry, unsigned type) -{ - spin_lock(&dentry->d_lock); - __d_set_type(dentry, type); - spin_unlock(&dentry->d_lock); -} - static inline unsigned __d_entry_type(const struct dentry *dentry) { - return dentry->d_flags & DCACHE_ENTRY_TYPE; + unsigned type = READ_ONCE(dentry->d_flags); + smp_rmb(); + return type & DCACHE_ENTRY_TYPE; } static inline bool d_is_miss(const struct dentry *dentry) @@ -482,6 +467,44 @@ static inline bool d_is_positive(const struct dentry *dentry) return !d_is_negative(dentry); } +/** + * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs) + * @dentry: The dentry in question + * + * Returns true if the dentry represents either an absent name or a name that + * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent + * a true miss, a whiteout that isn't represented by a 0,0 chardev or a + * fallthrough marker in an opaque directory. + * + * Note! (1) This should be used *only* by a filesystem to examine its own + * dentries. It should not be used to look at some other filesystem's + * dentries. (2) It should also be used in combination with d_inode() to get + * the inode. (3) The dentry may have something attached to ->d_lower and the + * type field of the flags may be set to something other than miss or whiteout. + */ +static inline bool d_really_is_negative(const struct dentry *dentry) +{ + return dentry->d_inode == NULL; +} + +/** + * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs) + * @dentry: The dentry in question + * + * Returns true if the dentry represents a name that maps to an inode + * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if + * that is represented on medium as a 0,0 chardev. + * + * Note! (1) This should be used *only* by a filesystem to examine its own + * dentries. It should not be used to look at some other filesystem's + * dentries. (2) It should also be used in combination with d_inode() to get + * the inode. + */ +static inline bool d_really_is_positive(const struct dentry *dentry) +{ + return dentry->d_inode != NULL; +} + extern void d_set_fallthru(struct dentry *dentry); static inline bool d_is_fallthru(const struct dentry *dentry) diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 439ff698000a..221025423e6c 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -43,6 +43,7 @@ enum dccp_state { DCCP_CLOSING = TCP_CLOSING, DCCP_TIME_WAIT = TCP_TIME_WAIT, DCCP_CLOSED = TCP_CLOSE, + DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV, DCCP_PARTOPEN = TCP_MAX_STATES, DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */ DCCP_MAX_STATES @@ -57,6 +58,7 @@ enum { DCCPF_CLOSING = TCPF_CLOSING, DCCPF_TIME_WAIT = TCPF_TIME_WAIT, DCCPF_CLOSED = TCPF_CLOSE, + DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV, DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN), }; @@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk) return NULL; } -extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req); +extern void dccp_syn_ack_timeout(const struct request_sock *req); #endif /* _LINUX_DCCP_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c3007cb4bfa6..ac07ff090919 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -34,6 +34,10 @@ struct dma_map_ops { void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); + /* + * map_sg returns 0 on error and a value > 0 on success. + * It should never return a value < 0. + */ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 1d869d185a0d..606563ef8a72 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -35,7 +35,6 @@ extern const struct header_ops eth_header_ops; int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); -int eth_rebuild_header(struct sk_buff *skb); int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); diff --git a/include/linux/filter.h b/include/linux/filter.h index caac2087a4d5..fa11b3a367be 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -145,8 +145,6 @@ struct bpf_prog_aux; .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) -#define BPF_PSEUDO_MAP_FD 1 - /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) @@ -310,9 +308,11 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ bool jited; /* Is our filter JIT'ed? */ + bool gpl_compatible; /* Is our filter GPL compatible? */ u32 len; /* Number of filter blocks */ - struct sock_fprog_kern *orig_prog; /* Original BPF program */ + enum bpf_prog_type type; /* Type of BPF program */ struct bpf_prog_aux *aux; /* Auxiliary fields */ + struct sock_fprog_kern *orig_prog; /* Original BPF program */ unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); /* Instructions for interpreter */ @@ -454,6 +454,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) BPF_ANCILLARY(VLAN_TAG_PRESENT); BPF_ANCILLARY(PAY_OFFSET); BPF_ANCILLARY(RANDOM); + BPF_ANCILLARY(VLAN_TPID); } /* Fallthrough. */ default: diff --git a/include/linux/fs.h b/include/linux/fs.h index d502e5436c84..c7496f263860 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -315,6 +315,8 @@ struct address_space; struct writeback_control; #define IOCB_EVENTFD (1 << 0) +#define IOCB_APPEND (1 << 1) +#define IOCB_DIRECT (1 << 2) struct kiocb { struct file *ki_filp; @@ -329,10 +331,13 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) return kiocb->ki_complete == NULL; } +static inline int iocb_flags(struct file *file); + static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) { *kiocb = (struct kiocb) { .ki_filp = filp, + .ki_flags = iocb_flags(filp), }; } @@ -383,7 +388,7 @@ struct address_space_operations { void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); - ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset); /* * migrate the contents of a page to the specified target. If * migrate_mode is MIGRATE_ASYNC, it must not block. @@ -870,6 +875,7 @@ static inline struct file *get_file(struct file *f) atomic_long_inc(&f->f_count); return f; } +#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count) #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) #define file_count(x) atomic_long_read(&(x)->f_count) @@ -915,8 +921,8 @@ struct file_lock_operations { struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long (*lm_owner_key)(struct file_lock *); - void (*lm_get_owner)(struct file_lock *, struct file_lock *); - void (*lm_put_owner)(struct file_lock *); + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); @@ -1041,6 +1047,9 @@ extern void lease_get_mtime(struct inode *, struct timespec *time); extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); extern int vfs_setlease(struct file *, long, struct file_lock **, void **); extern int lease_modify(struct file_lock *, int, struct list_head *); +struct files_struct; +extern void show_fd_locks(struct seq_file *f, + struct file *filp, struct files_struct *files); #else /* !CONFIG_FILE_LOCKING */ static inline int fcntl_getlk(struct file *file, unsigned int cmd, struct flock __user *user) @@ -1177,6 +1186,10 @@ static inline int lease_modify(struct file_lock *fl, int arg, { return -EINVAL; } + +struct files_struct; +static inline void show_fd_locks(struct seq_file *f, + struct file *filp, struct files_struct *files) {} #endif /* !CONFIG_FILE_LOCKING */ @@ -1562,8 +1575,6 @@ struct file_operations { loff_t (*llseek) (struct file *, loff_t, int); ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); - ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); - ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iterate) (struct file *, struct dir_context *); @@ -1639,6 +1650,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, struct iovec **ret_pointer); extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); +extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_readv(struct file *, const struct iovec __user *, @@ -2567,16 +2579,12 @@ extern int sb_min_blocksize(struct super_block *, int); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); -int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); +extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t); extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); -extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); -extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); -extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); -extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos); ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos); @@ -2614,12 +2622,13 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset, extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); -ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *, - loff_t, get_block_t, dio_iodone_t, int flags); +ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, + get_block_t, dio_iodone_t, int flags); int dax_clear_blocks(struct inode *, sector_t block, long size); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); +int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) #ifdef CONFIG_BLOCK @@ -2639,16 +2648,18 @@ enum { void dio_end_io(struct bio *bio, int error); -ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, - struct block_device *bdev, struct iov_iter *iter, loff_t offset, - get_block_t get_block, dio_iodone_t end_io, - dio_submit_t submit_io, int flags); +ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, struct iov_iter *iter, + loff_t offset, get_block_t get_block, + dio_iodone_t end_io, dio_submit_t submit_io, + int flags); -static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, - struct inode *inode, struct iov_iter *iter, loff_t offset, - get_block_t get_block) +static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, + struct inode *inode, + struct iov_iter *iter, loff_t offset, + get_block_t get_block) { - return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter, + return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, offset, get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES); } @@ -2684,7 +2695,6 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes); loff_t inode_get_bytes(struct inode *inode); void inode_set_bytes(struct inode *inode, loff_t bytes); -extern int vfs_readdir(struct file *, filldir_t, void *); extern int iterate_dir(struct file *, struct dir_context *); extern int vfs_stat(const char __user *, struct kstat *); @@ -2782,6 +2792,16 @@ static inline bool io_is_direct(struct file *filp) return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp)); } +static inline int iocb_flags(struct file *file) +{ + int res = 0; + if (file->f_flags & O_APPEND) + res |= IOCB_APPEND; + if (io_is_direct(file)) + res |= IOCB_DIRECT; + return res; +} + static inline ino_t parent_ino(struct dentry *dentry) { ino_t res; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 7b5785032049..205026175c42 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -22,7 +22,13 @@ struct mmu_gather; struct hugepage_subpool { spinlock_t lock; long count; - long max_hpages, used_hpages; + long max_hpages; /* Maximum huge pages or -1 if no maximum. */ + long used_hpages; /* Used count against maximum, includes */ + /* both alloced and reserved pages. */ + struct hstate *hstate; + long min_hpages; /* Minimum huge pages or -1 if no minimum. */ + long rsv_hpages; /* Pages reserved against global pool to */ + /* sasitfy minimum size. */ }; struct resv_map { @@ -38,11 +44,10 @@ extern int hugetlb_max_hstate __read_mostly; #define for_each_hstate(h) \ for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) -struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); +struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, + long min_hpages); void hugepage_put_subpool(struct hugepage_subpool *spool); -int PageHuge(struct page *page); - void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); @@ -79,7 +84,6 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); -bool is_hugepage_active(struct page *page); void free_huge_page(struct page *page); #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE @@ -109,11 +113,6 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, #else /* !CONFIG_HUGETLB_PAGE */ -static inline int PageHuge(struct page *page) -{ - return 0; -} - static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { } @@ -152,7 +151,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list) return false; } #define putback_active_hugepage(p) do {} while (0) -#define is_hugepage_active(x) false static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index eb7b414d232b..4f7d8f4b1e9a 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -50,10 +50,14 @@ struct hwrng { struct completion cleanup_done; }; +struct device; + /** Register a new Hardware Random Number Generator driver. */ extern int hwrng_register(struct hwrng *rng); +extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); /** Unregister a Hardware Random Number Generator driver. */ extern void hwrng_unregister(struct hwrng *rng); +extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); /** Feed random bits into the pool. */ extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy); diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 6e82d888287c..8872ca103d06 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -28,7 +28,9 @@ #include <asm/byteorder.h> #define IEEE802154_MTU 127 -#define IEEE802154_MIN_PSDU_LEN 5 +#define IEEE802154_ACK_PSDU_LEN 5 +#define IEEE802154_MIN_PSDU_LEN 9 +#define IEEE802154_FCS_LEN 2 #define IEEE802154_PAN_ID_BROADCAST 0xffff #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff @@ -38,6 +40,7 @@ #define IEEE802154_LIFS_PERIOD 40 #define IEEE802154_SIFS_PERIOD 12 +#define IEEE802154_MAX_SIFS_FRAME_SIZE 18 #define IEEE802154_MAX_CHANNEL 26 #define IEEE802154_MAX_PAGE 31 @@ -204,11 +207,18 @@ enum { /** * ieee802154_is_valid_psdu_len - check if psdu len is valid + * available lengths: + * 0-4 Reserved + * 5 MPDU (Acknowledgment) + * 6-8 Reserved + * 9-127 MPDU + * * @len: psdu len with (MHR + payload + MFR) */ static inline bool ieee802154_is_valid_psdu_len(const u8 len) { - return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); + return (len == IEEE802154_ACK_PSDU_LEN || + (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU)); } /** diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index a57bca2ea97e..dad8b00beed2 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -44,6 +44,7 @@ struct br_ip_list { #define BR_PROMISC BIT(7) #define BR_PROXYARP BIT(8) #define BR_LEARNING_SYNC BIT(9) +#define BR_PROXYARP_WIFI BIT(10) extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 119130e9298b..da4929927f69 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -14,5 +14,6 @@ struct ifla_vf_info { __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; + __u32 rss_query_en; }; #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index aff7ad8a4ea3..66a7d7600f43 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -19,6 +19,7 @@ #include <linux/netdevice.h> #include <linux/ppp_channel.h> #include <linux/skbuff.h> +#include <linux/workqueue.h> #include <uapi/linux/if_pppox.h> static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) @@ -32,6 +33,7 @@ struct pppoe_opt { struct pppoe_addr pa; /* what this socket is bound to*/ struct sockaddr_pppox relay; /* what socket data will be relayed to (PPPoE relaying) */ + struct work_struct padt_work;/* Work item for handling PADT */ }; struct pptp_opt { diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b11b28a30b9e..920e4457ce6e 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -561,4 +561,71 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, skb->protocol = htons(ETH_P_802_2); } +/** + * skb_vlan_tagged - check if skb is vlan tagged. + * @skb: skbuff to query + * + * Returns true if the skb is tagged, regardless of whether it is hardware + * accelerated or not. + */ +static inline bool skb_vlan_tagged(const struct sk_buff *skb) +{ + if (!skb_vlan_tag_present(skb) && + likely(skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD))) + return false; + + return true; +} + +/** + * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. + * @skb: skbuff to query + * + * Returns true if the skb is tagged with multiple vlan headers, regardless + * of whether it is hardware accelerated or not. + */ +static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + + if (!skb_vlan_tag_present(skb)) { + struct vlan_ethhdr *veh; + + if (likely(protocol != htons(ETH_P_8021Q) && + protocol != htons(ETH_P_8021AD))) + return false; + + veh = (struct vlan_ethhdr *)skb->data; + protocol = veh->h_vlan_encapsulated_proto; + } + + if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) + return false; + + return true; +} + +/** + * vlan_features_check - drop unsafe features for skb with multiple tags. + * @skb: skbuff to query + * @features: features to be checked + * + * Returns features without unsafe ones if the skb has multiple tags. + */ +static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, + netdev_features_t features) +{ + if (skb_vlan_tagged_multi(skb)) + features = netdev_intersect_features(features, + NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_FRAGLIST | + NETIF_F_GEN_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + return features; +} + #endif /* !(_LINUX_IF_VLAN_H_) */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 46da02410a09..ac48b10c9395 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -11,33 +11,34 @@ struct sk_buff; struct netlink_callback; struct inet_diag_handler { - void (*dump)(struct sk_buff *skb, - struct netlink_callback *cb, - struct inet_diag_req_v2 *r, - struct nlattr *bc); - - int (*dump_one)(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, - struct inet_diag_req_v2 *req); - - void (*idiag_get_info)(struct sock *sk, - struct inet_diag_msg *r, - void *info); - __u16 idiag_type; + void (*dump)(struct sk_buff *skb, + struct netlink_callback *cb, + const struct inet_diag_req_v2 *r, + struct nlattr *bc); + + int (*dump_one)(struct sk_buff *in_skb, + const struct nlmsghdr *nlh, + const struct inet_diag_req_v2 *req); + + void (*idiag_get_info)(struct sock *sk, + struct inet_diag_msg *r, + void *info); + __u16 idiag_type; }; struct inet_connection_sock; int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh); + struct sk_buff *skb, const struct inet_diag_req_v2 *req, + struct user_namespace *user_ns, + u32 pid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, - struct netlink_callback *cb, struct inet_diag_req_v2 *r, - struct nlattr *bc); + struct netlink_callback *cb, + const struct inet_diag_req_v2 *r, + struct nlattr *bc); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req_v2 *req); + struct sk_buff *in_skb, const struct nlmsghdr *nlh, + const struct inet_diag_req_v2 *req); int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 2c5250222278..388e3ae94f7a 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -196,10 +196,8 @@ extern struct resource * __request_region(struct resource *, /* Compatibility cruft */ #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) -#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n)) #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) -extern int __check_region(struct resource *, resource_size_t, resource_size_t); extern void __release_region(struct resource *, resource_size_t, resource_size_t); #ifdef CONFIG_MEMORY_HOTREMOVE @@ -207,12 +205,6 @@ extern int release_mem_region_adjustable(struct resource *, resource_size_t, resource_size_t); #endif -static inline int __deprecated check_region(resource_size_t s, - resource_size_t n) -{ - return __check_region(&ioport_resource, s, n); -} - /* Wrappers for managed devices */ struct device; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 4d5169f5d7d1..82806c60aa42 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -53,6 +53,10 @@ struct ipv6_devconf { __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; + struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; + } stable_secret; void *sysctl; }; diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 47cb09edec1a..348c6f47e4cc 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) } -/* jhash_3words - hash exactly 3, 2 or 1 word(s) */ -static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ +static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) { - a += JHASH_INITVAL; - b += JHASH_INITVAL; + a += initval; + b += initval; c += initval; __jhash_final(a, b, c); @@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) return c; } +static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +{ + return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); +} + static inline u32 jhash_2words(u32 a, u32 b, u32 initval) { - return jhash_3words(a, b, 0, initval); + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); } static inline u32 jhash_1word(u32 a, u32 initval) { - return jhash_3words(a, 0, 0, initval); + return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2)); } #endif /* _LINUX_JHASH_H */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 5bb074431eb0..5486d777b706 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -44,6 +44,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); void kasan_kmalloc_large(const void *ptr, size_t size); void kasan_kfree_large(const void *ptr); +void kasan_kfree(void *ptr); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size); void kasan_krealloc(const void *object, size_t new_size); @@ -71,6 +72,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache, static inline void kasan_kmalloc_large(void *ptr, size_t size) {} static inline void kasan_kfree_large(const void *ptr) {} +static inline void kasan_kfree(void *ptr) {} static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size) {} static inline void kasan_krealloc(const void *object, size_t new_size) {} diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index be342b94c640..63ca8dacec59 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -23,14 +23,6 @@ #define ___config_enabled(__ignored, val, ...) val /* - * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', - * 0 otherwise. - * - */ -#define IS_ENABLED(option) \ - (config_enabled(option) || config_enabled(option##_MODULE)) - -/* * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 * otherwise. For boolean options, this is equivalent to * IS_ENABLED(CONFIG_FOO). @@ -43,4 +35,11 @@ */ #define IS_MODULE(option) config_enabled(option##_MODULE) +/* + * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', + * 0 otherwise. + */ +#define IS_ENABLED(option) \ + (IS_BUILTIN(option) || IS_MODULE(option)) + #endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d6d630d31ef3..3a5b48e52a9e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -103,6 +103,18 @@ (((__x) - ((__d) / 2)) / (__d)); \ } \ ) +/* + * Same as above but for u64 dividends. divisor must be a 32-bit + * number. + */ +#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ +{ \ + typeof(divisor) __d = divisor; \ + unsigned long long _tmp = (x) + (__d) / 2; \ + do_div(_tmp, __d); \ + _tmp; \ +} \ +) /* * Multiplies an integer by a fraction, while avoiding unnecessary diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 3be6bb18562d..7ae216a39c9e 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -35,18 +35,6 @@ static inline void ksm_exit(struct mm_struct *mm) __ksm_exit(mm); } -/* - * A KSM page is one of those write-protected "shared pages" or "merged pages" - * which KSM maps into multiple mms, wherever identical anonymous page content - * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any - * anon_vma, but to that page's node of the stable tree. - */ -static inline int PageKsm(struct page *page) -{ - return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == - (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); -} - static inline struct stable_node *page_stable_node(struct page *page) { return PageKsm(page) ? page_rmapping(page) : NULL; @@ -87,11 +75,6 @@ static inline void ksm_exit(struct mm_struct *mm) { } -static inline int PageKsm(struct page *page) -{ - return 0; -} - #ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h index 5ba2facd7a51..e97966d1fb8d 100644 --- a/include/linux/led-class-flash.h +++ b/include/linux/led-class-flash.h @@ -13,7 +13,6 @@ #define __LINUX_FLASH_LEDS_H_INCLUDED #include <linux/leds.h> -#include <uapi/linux/v4l2-controls.h> struct device_node; struct led_classdev_flash; @@ -33,7 +32,7 @@ struct led_classdev_flash; #define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8) #define LED_NUM_FLASH_FAULTS 9 -#define LED_FLASH_MAX_SYSFS_GROUPS 7 +#define LED_FLASH_SYSFS_GROUPS_SIZE 5 struct led_flash_ops { /* set flash brightness */ @@ -81,21 +80,7 @@ struct led_classdev_flash { struct led_flash_setting timeout; /* LED Flash class sysfs groups */ - const struct attribute_group *sysfs_groups[LED_FLASH_MAX_SYSFS_GROUPS]; - - /* LEDs available for flash strobe synchronization */ - struct led_classdev_flash **sync_leds; - - /* Number of LEDs available for flash strobe synchronization */ - int num_sync_leds; - - /* - * The identifier of the sub-led to synchronize the flash strobe with. - * Identifiers start from 1, which reflects the first element from the - * sync_leds array. 0 means that the flash strobe should not be - * synchronized. - */ - u32 sync_led_id; + const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE]; }; static inline struct led_classdev_flash *lcdev_to_flcdev( diff --git a/include/linux/leds.h b/include/linux/leds.h index f70f84f35674..9a2b000094cf 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -47,7 +47,6 @@ struct led_classdev { #define SET_BRIGHTNESS_ASYNC (1 << 21) #define SET_BRIGHTNESS_SYNC (1 << 22) #define LED_DEV_CAP_FLASH (1 << 23) -#define LED_DEV_CAP_SYNC_STROBE (1 << 24) /* Set LED brightness level */ /* Must not sleep, use a workqueue if needed */ @@ -105,7 +104,11 @@ struct led_classdev { extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern int devm_led_classdev_register(struct device *parent, + struct led_classdev *led_cdev); extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern void devm_led_classdev_unregister(struct device *parent, + struct led_classdev *led_cdev); extern void led_classdev_suspend(struct led_classdev *led_cdev); extern void led_classdev_resume(struct led_classdev *led_cdev); diff --git a/include/linux/mempool.h b/include/linux/mempool.h index b19b3023c880..69b6951e8fd2 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -36,7 +36,8 @@ extern void mempool_free(void *element, mempool_t *pool); /* * A mempool_alloc_t and mempool_free_t that get the memory from - * a slab that is passed in through pool_data. + * a slab cache that is passed in through pool_data. + * Note: the slab cache may not have a ctor function. */ void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); void mempool_free_slab(void *element, void *pool_data); diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h index b6401e7661c7..29c30ac36020 100644 --- a/include/linux/mfd/samsung/rtc.h +++ b/include/linux/mfd/samsung/rtc.h @@ -105,6 +105,8 @@ enum s2mps_rtc_reg { #define S5M_RTC_UDR_MASK (1 << S5M_RTC_UDR_SHIFT) #define S2MPS_RTC_WUDR_SHIFT 4 #define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT) +#define S2MPS13_RTC_AUDR_SHIFT 1 +#define S2MPS13_RTC_AUDR_MASK (1 << S2MPS13_RTC_AUDR_SHIFT) #define S2MPS_RTC_RUDR_SHIFT 0 #define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT) #define RTC_TCON_SHIFT 1 diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 7b6d4e9ff603..f62e7cf227c6 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -68,6 +68,8 @@ enum { MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_SET_ICM_SIZE = 0xffd, MLX4_CMD_ACCESS_REG = 0x3b, + MLX4_CMD_ALLOCATE_VPP = 0x80, + MLX4_CMD_SET_VPORT_QOS = 0x81, /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, @@ -163,6 +165,9 @@ enum { MLX4_QP_FLOW_STEERING_ATTACH = 0x65, MLX4_QP_FLOW_STEERING_DETACH = 0x66, MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64, + + /* Update and read QCN parameters */ + MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68, }; enum { @@ -183,7 +188,14 @@ enum { }; enum { - /* set port opcode modifiers */ + /* Set port opcode modifiers */ + MLX4_SET_PORT_IB_OPCODE = 0x0, + MLX4_SET_PORT_ETH_OPCODE = 0x1, + MLX4_SET_PORT_BEACON_OPCODE = 0x4, +}; + +enum { + /* Set port Ethernet input modifiers */ MLX4_SET_PORT_GENERAL = 0x0, MLX4_SET_PORT_RQP_CALC = 0x1, MLX4_SET_PORT_MAC_TABLE = 0x2, @@ -233,6 +245,16 @@ struct mlx4_config_dev_params { u8 rx_csum_flags_port_2; }; +enum mlx4_en_congestion_control_algorithm { + MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0, +}; + +enum mlx4_en_congestion_control_opmod { + MLX4_CONGESTION_CONTROL_GET_PARAMS, + MLX4_CONGESTION_CONTROL_GET_STATISTICS, + MLX4_CONGESTION_CONTROL_SET_PARAMS = 4, +}; + struct mlx4_dev; struct mlx4_cmd_mailbox { @@ -281,6 +303,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo u32 mlx4_comm_get_version(void); int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); +int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, + int max_tx_rate); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index e4ebff7e9d02..f9ce34bec45b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -49,8 +49,6 @@ #define MSIX_LEGACY_SZ 4 #define MIN_MSIX_P_PORT 5 -#define MLX4_NUM_UP 8 -#define MLX4_NUM_TC 8 #define MLX4_MAX_100M_UNITS_VAL 255 /* * work around: can't set values * greater then this value when @@ -174,6 +172,7 @@ enum { MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, + MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52, MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53, MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, @@ -203,7 +202,14 @@ enum { MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20, - MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21 + MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21, + MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22, + MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23, + MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24, + MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25, + MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26, + MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27, + MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, }; enum { @@ -449,6 +455,21 @@ enum mlx4_module_id { MLX4_MODULE_ID_QSFP28 = 0x11, }; +enum { /* rl */ + MLX4_QP_RATE_LIMIT_NONE = 0, + MLX4_QP_RATE_LIMIT_KBS = 1, + MLX4_QP_RATE_LIMIT_MBS = 2, + MLX4_QP_RATE_LIMIT_GBS = 3 +}; + +struct mlx4_rate_limit_caps { + u16 num_rates; /* Number of different rates */ + u8 min_unit; + u16 min_val; + u8 max_unit; + u16 max_val; +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; @@ -564,6 +585,7 @@ struct mlx4_caps { u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; u32 vf_caps; + struct mlx4_rate_limit_caps rl_caps; }; struct mlx4_buf_list { @@ -982,6 +1004,11 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev) return dev->flags & MLX4_FLAG_SLAVE; } +static inline int mlx4_is_eth(struct mlx4_dev *dev, int port) +{ + return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1; +} + int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, struct mlx4_buf *buf, gfp_t gfp); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); @@ -1282,14 +1309,13 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port); int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); -void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap); int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, u8 promisc); -int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); -int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, - u8 *pg, u16 *ratelimit); +int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time); +int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, + u8 ignore_fcs_value); int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable); int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 551f85456c11..6fed539e5456 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -207,14 +207,17 @@ struct mlx4_qp_context { __be32 msn; __be16 rq_wqe_counter; __be16 sq_wqe_counter; - u32 reserved3[2]; + u32 reserved3; + __be16 rate_limit_params; + u8 reserved4; + u8 qos_vport; __be32 param3; __be32 nummmcpeers_basemkey; u8 log_page_size; - u8 reserved4[2]; + u8 reserved5[2]; u8 mtt_base_addr_h; __be32 mtt_base_addr_l; - u32 reserved5[10]; + u32 reserved6[10]; }; struct mlx4_update_qp_context { @@ -229,6 +232,8 @@ struct mlx4_update_qp_context { enum { MLX4_UPD_QP_MASK_PM_STATE = 32, MLX4_UPD_QP_MASK_VSD = 33, + MLX4_UPD_QP_MASK_QOS_VPP = 34, + MLX4_UPD_QP_MASK_RATE_LIMIT = 35, }; enum { @@ -428,7 +433,9 @@ struct mlx4_wqe_inline_seg { enum mlx4_update_qp_attr { MLX4_UPDATE_QP_SMAC = 1 << 0, MLX4_UPDATE_QP_VSD = 1 << 1, - MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 + MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, + MLX4_UPDATE_QP_QOS_VPORT = 1 << 3, + MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1 }; enum mlx4_update_qp_params_flags { @@ -437,7 +444,10 @@ enum mlx4_update_qp_params_flags { struct mlx4_update_qp_params { u8 smac_index; + u8 qos_vport; u32 flags; + u16 rate_unit; + u16 rate_val; }; int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h index 2826a4b6071e..68cd08f02c2f 100644 --- a/include/linux/mlx5/cmd.h +++ b/include/linux/mlx5/cmd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index f6b17ac601bd..2695ced222df 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -137,14 +137,15 @@ enum { static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, void __iomem *uar_page, - spinlock_t *doorbell_lock) + spinlock_t *doorbell_lock, + u32 cons_index) { __be32 doorbell[2]; u32 sn; u32 ci; sn = cq->arm_sn & 3; - ci = cq->cons_index & 0xffffff; + ci = cons_index & 0xffffff; *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 4e5bd813bb9a..abf65c790421 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h index 163a818411e7..afc78a3f4462 100644 --- a/include/linux/mlx5/doorbell.h +++ b/include/linux/mlx5/doorbell.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 166d9315fe4b..9a90e7523dc2 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -232,6 +232,9 @@ struct mlx5_cmd_stats { }; struct mlx5_cmd { + void *cmd_alloc_buf; + dma_addr_t alloc_dma; + int alloc_size; void *cmd_buf; dma_addr_t dma; u16 cmdif_rev; @@ -407,7 +410,7 @@ struct mlx5_core_srq { struct mlx5_eq_table { void __iomem *update_ci; void __iomem *update_arm_ci; - struct list_head *comp_eq_head; + struct list_head comp_eqs_list; struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; @@ -722,6 +725,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -777,14 +781,22 @@ enum { MAX_MR_CACHE_ENTRIES = 16, }; +enum { + MLX5_INTERFACE_PROTOCOL_IB = 0, + MLX5_INTERFACE_PROTOCOL_ETH = 1, +}; + struct mlx5_interface { void * (*add)(struct mlx5_core_dev *dev); void (*remove)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); + void * (*get_dev)(void *context); + int protocol; struct list_head list; }; +void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5f48b8f592c5..cb3ad17edd1f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 61f7a342d1bf..310b5f7fd6ae 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h index e1a363a33663..f43ed054a3e0 100644 --- a/include/linux/mlx5/srq.h +++ b/include/linux/mlx5/srq.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mm.h b/include/linux/mm.h index 6571dd78e984..8b086070c3a5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -251,6 +251,9 @@ struct vm_operations_struct { * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ + int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware */ @@ -494,18 +497,9 @@ static inline int page_count(struct page *page) return atomic_read(&compound_head(page)->_count); } -#ifdef CONFIG_HUGETLB_PAGE -extern int PageHeadHuge(struct page *page_head); -#else /* CONFIG_HUGETLB_PAGE */ -static inline int PageHeadHuge(struct page *page_head) -{ - return 0; -} -#endif /* CONFIG_HUGETLB_PAGE */ - static inline bool __compound_tail_refcounted(struct page *page) { - return !PageSlab(page) && !PageHeadHuge(page); + return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page); } /* @@ -571,53 +565,6 @@ static inline void init_page_count(struct page *page) atomic_set(&page->_count, 1); } -/* - * PageBuddy() indicate that the page is free and in the buddy system - * (see mm/page_alloc.c). - * - * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to - * -2 so that an underflow of the page_mapcount() won't be mistaken - * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very - * efficiently by most CPU architectures. - */ -#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) - -static inline int PageBuddy(struct page *page) -{ - return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; -} - -static inline void __SetPageBuddy(struct page *page) -{ - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); - atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); -} - -static inline void __ClearPageBuddy(struct page *page) -{ - VM_BUG_ON_PAGE(!PageBuddy(page), page); - atomic_set(&page->_mapcount, -1); -} - -#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) - -static inline int PageBalloon(struct page *page) -{ - return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; -} - -static inline void __SetPageBalloon(struct page *page) -{ - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); - atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); -} - -static inline void __ClearPageBalloon(struct page *page) -{ - VM_BUG_ON_PAGE(!PageBalloon(page), page); - atomic_set(&page->_mapcount, -1); -} - void put_page(struct page *page); void put_pages_list(struct list_head *pages); @@ -1006,34 +953,10 @@ void page_address_init(void); #define page_address_init() do { } while(0) #endif -/* - * On an anonymous page mapped into a user virtual memory area, - * page->mapping points to its anon_vma, not to a struct address_space; - * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. - * - * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, - * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; - * and then page->mapping points, not to an anon_vma, but to a private - * structure which KSM associates with that merged page. See ksm.h. - * - * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. - * - * Please note that, confusingly, "page_mapping" refers to the inode - * address_space which maps the page from disk; whereas "page_mapped" - * refers to user virtual address space into which the page is mapped. - */ -#define PAGE_MAPPING_ANON 1 -#define PAGE_MAPPING_KSM 2 -#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) - +extern void *page_rmapping(struct page *page); +extern struct anon_vma *page_anon_vma(struct page *page); extern struct address_space *page_mapping(struct page *page); -/* Neutral page->mapping pointer to address_space or anon_vma or other */ -static inline void *page_rmapping(struct page *page) -{ - return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); -} - extern struct address_space *__page_file_mapping(struct page *); static inline @@ -1045,11 +968,6 @@ struct address_space *page_file_mapping(struct page *page) return page->mapping; } -static inline int PageAnon(struct page *page) -{ - return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; -} - /* * Return the pagecache index of the passed page. Regular pagecache pages * use ->index whereas swapcache pages use ->private @@ -1975,10 +1893,10 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); static inline unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) { - if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN)) - return unmapped_area(info); - else + if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) return unmapped_area_topdown(info); + else + return unmapped_area(info); } /* truncate.c */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 590630eb59ba..8d37e26a1007 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -429,7 +429,7 @@ struct mm_struct { #endif /* store ref to file /proc/<pid>/exe symlink points to */ - struct file *exe_file; + struct file __rcu *exe_file; #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; #endif diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 996807963716..83430f2ea757 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -33,6 +33,8 @@ #define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 +#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_VENDOR_ID_INTEL 0x0089 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2782df47101e..54d74f6eb233 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -842,16 +842,16 @@ static inline int populated_zone(struct zone *zone) extern int movable_zone; +#ifdef CONFIG_HIGHMEM static inline int zone_movable_is_highmem(void) { -#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP return movable_zone == ZONE_HIGHMEM; -#elif defined(CONFIG_HIGHMEM) - return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; #else - return 0; + return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; #endif } +#endif static inline int is_highmem_idx(enum zone_type idx) { diff --git a/include/linux/nbd.h b/include/linux/nbd.h deleted file mode 100644 index f62f78aef4ac..000000000000 --- a/include/linux/nbd.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * 1999 Copyright (C) Pavel Machek, pavel@ucw.cz. This code is GPL. - * 1999/11/04 Copyright (C) 1999 VMware, Inc. (Regis "HPReg" Duchesne) - * Made nbd_end_request() use the io_request_lock - * 2001 Copyright (C) Steven Whitehouse - * New nbd_end_request() for compatibility with new linux block - * layer code. - * 2003/06/24 Louis D. Langholtz <ldl@aros.net> - * Removed unneeded blksize_bits field from nbd_device struct. - * Cleanup PARANOIA usage & code. - * 2004/02/19 Paul Clements - * Removed PARANOIA, plus various cleanup and comments - */ -#ifndef LINUX_NBD_H -#define LINUX_NBD_H - - -#include <linux/wait.h> -#include <linux/mutex.h> -#include <uapi/linux/nbd.h> - -struct request; - -struct nbd_device { - int flags; - int harderror; /* Code of hard error */ - struct socket * sock; /* If == NULL, device is not ready, yet */ - int magic; - - spinlock_t queue_lock; - struct list_head queue_head; /* Requests waiting result */ - struct request *active_req; - wait_queue_head_t active_wq; - struct list_head waiting_queue; /* Requests to be sent */ - wait_queue_head_t waiting_wq; - - struct mutex tx_lock; - struct gendisk *disk; - int blksize; - u64 bytesize; - pid_t pid; /* pid of nbd-client, if attached */ - int xmit_timeout; - int disconnect; /* a disconnect has been requested by user */ -}; - -#endif diff --git a/include/linux/net.h b/include/linux/net.h index 17d83393afcc..738ea48be889 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -120,7 +120,6 @@ struct socket { struct vm_area_struct; struct page; -struct kiocb; struct sockaddr; struct msghdr; struct module; @@ -162,8 +161,8 @@ struct proto_ops { int (*compat_getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); #endif - int (*sendmsg) (struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len); + int (*sendmsg) (struct socket *sock, struct msghdr *m, + size_t total_len); /* Notes for implementing recvmsg: * =============================== * msg->msg_namelen should get updated by the recvmsg handlers @@ -172,9 +171,8 @@ struct proto_ops { * handlers can assume that msg.msg_name is either NULL or has * a minimum size of sizeof(struct sockaddr_storage). */ - int (*recvmsg) (struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, - int flags); + int (*recvmsg) (struct socket *sock, struct msghdr *m, + size_t total_len, int flags); int (*mmap) (struct file *file, struct socket *sock, struct vm_area_struct * vma); ssize_t (*sendpage) (struct socket *sock, struct page *page, @@ -213,7 +211,7 @@ int sock_create(int family, int type, int proto, struct socket **res); int sock_create_kern(int family, int type, int proto, struct socket **res); int sock_create_lite(int family, int type, int proto, struct socket **res); void sock_release(struct socket *sock); -int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len); +int sock_sendmsg(struct socket *sock, struct msghdr *msg); int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 278738873703..b5679aed660b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -261,7 +261,6 @@ struct header_ops { unsigned short type, const void *daddr, const void *saddr, unsigned int len); int (*parse)(const struct sk_buff *skb, unsigned char *haddr); - int (*rebuild)(struct sk_buff *skb); int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void (*cache_update)(struct hh_cache *hh, const struct net_device *dev, @@ -588,6 +587,7 @@ struct netdev_queue { #ifdef CONFIG_BQL struct dql dql; #endif + unsigned long tx_maxrate; } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -795,7 +795,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, * struct net_device *dev); * Called when a packet needs to be transmitted. - * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. + * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop + * the queue before that can happen; it's for obsolete devices and weird + * corner cases, but the stack really does a non-trivial amount + * of useless work if you return NETDEV_TX_BUSY. * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required can not be NULL. * @@ -875,6 +878,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); * int (*ndo_set_vf_port)(struct net_device *dev, int vf, * struct nlattr *port[]); + * + * Enable or disable the VF ability to query its RSS Redirection Table and + * Hash Key. This is needed since on some devices VF share this information + * with PF and querying it may adduce a theoretical security risk. + * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) * Called to setup 'tc' number of traffic classes in the net device. This @@ -1026,15 +1034,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * be otherwise expressed by feature flags. The check is called with * the set of features that the stack has calculated and it returns * those the driver believes to be appropriate. - * - * int (*ndo_switch_parent_id_get)(struct net_device *dev, - * struct netdev_phys_item_id *psid); - * Called to get an ID of the switch chip this port is part of. - * If driver implements this, it indicates that it represents a port - * of a switch chip. - * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); - * Called to notify switch device port of bridge port STP - * state change. + * int (*ndo_set_tx_maxrate)(struct net_device *dev, + * int queue_index, u32 maxrate); + * Called when a user wants to set a max-rate limitation of specific + * TX queue. + * int (*ndo_get_iflink)(const struct net_device *dev); + * Called to get the iflink value of this device. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1099,6 +1104,9 @@ struct net_device_ops { struct nlattr *port[]); int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); + int (*ndo_set_vf_rss_query_en)( + struct net_device *dev, + int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, u8 tc); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); @@ -1172,6 +1180,8 @@ struct net_device_ops { bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + int (*ndo_get_phys_port_name)(struct net_device *dev, + char *name, size_t len); void (*ndo_add_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); @@ -1191,12 +1201,10 @@ struct net_device_ops { netdev_features_t (*ndo_features_check) (struct sk_buff *skb, struct net_device *dev, netdev_features_t features); -#ifdef CONFIG_NET_SWITCHDEV - int (*ndo_switch_parent_id_get)(struct net_device *dev, - struct netdev_phys_item_id *psid); - int (*ndo_switch_port_stp_update)(struct net_device *dev, - u8 state); -#endif + int (*ndo_set_tx_maxrate)(struct net_device *dev, + int queue_index, + u32 maxrate); + int (*ndo_get_iflink)(const struct net_device *dev); }; /** @@ -1305,6 +1313,8 @@ enum netdev_priv_flags { * @base_addr: Device I/O address * @irq: Device IRQ number * + * @carrier_changes: Stats to monitor carrier on<->off transitions + * * @state: Generic network queuing layer state, see netdev_state_t * @dev_list: The global list of network devices * @napi_list: List entry, that is used for polling napi devices @@ -1328,7 +1338,7 @@ enum netdev_priv_flags { * @mpls_features: Mask of features inheritable by MPLS * * @ifindex: interface index - * @iflink: unique device identifier + * @group: The group, that the device belongs to * * @stats: Statistics struct, which was left as a legacy, use * rtnl_link_stats64 instead @@ -1338,8 +1348,6 @@ enum netdev_priv_flags { * @tx_dropped: Dropped packets by core network, * do not use this in drivers * - * @carrier_changes: Stats to monitor carrier on<->off transitions - * * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, * see <net/iw_handler.h> for details. @@ -1348,8 +1356,7 @@ enum netdev_priv_flags { * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions * @ethtool_ops: Management operations - * @fwd_ops: Management operations - * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc + * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. * * @flags: Interface flags (a la BSD) @@ -1383,14 +1390,14 @@ enum netdev_priv_flags { * @dev_port: Used to differentiate devices that share * the same function * @addr_list_lock: XXX: need comments on this one - * @uc: unicast mac addresses - * @mc: multicast mac addresses - * @dev_addrs: list of device hw addresses - * @queues_kset: Group of all Kobjects in the Tx and RX queues * @uc_promisc: Counter, that indicates, that promiscuous mode * has been enabled due to the need to listen to * additional unicast addresses in a device that * does not implement ndo_set_rx_mode() + * @uc: unicast mac addresses + * @mc: multicast mac addresses + * @dev_addrs: list of device hw addresses + * @queues_kset: Group of all Kobjects in the Tx and RX queues * @promiscuity: Number of times, the NIC is told to work in * Promiscuous mode, if it becomes 0 the NIC will * exit from working in Promiscuous mode @@ -1420,6 +1427,12 @@ enum netdev_priv_flags { * @ingress_queue: XXX: need comments on this one * @broadcast: hw bcast address * + * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, + * indexed by RX queue number. Assigned by driver. + * This must only be set if the ndo_rx_flow_steer + * operation is defined + * @index_hlist: Device index hash chain + * * @_tx: Array of TX queues * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time * @real_num_tx_queues: Number of TX queues currently active in device @@ -1429,11 +1442,6 @@ enum netdev_priv_flags { * * @xps_maps: XXX: need comments on this one * - * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, - * indexed by RX queue number. Assigned by driver. - * This must only be set if the ndo_rx_flow_steer - * operation is defined - * * @trans_start: Time (in jiffies) of last Tx * @watchdog_timeo: Represents the timeout that is used by * the watchdog ( see dev_watchdog() ) @@ -1441,7 +1449,6 @@ enum netdev_priv_flags { * * @pcpu_refcnt: Number of references to this device * @todo_list: Delayed register/unregister - * @index_hlist: Device index hash chain * @link_watch_list: XXX: need comments on this one * * @reg_state: Register/unregister state machine @@ -1489,7 +1496,6 @@ enum netdev_priv_flags { * * @qdisc_tx_busylock: XXX: need comments on this one * - * @group: The group, that the device belongs to * @pm_qos_req: Power Management QoS object * * FIXME: cleanup struct net_device such that network protocol info @@ -1509,6 +1515,8 @@ struct net_device { unsigned long base_addr; int irq; + atomic_t carrier_changes; + /* * Some hardware also needs these fields (state,dev_list, * napi_list,unreg_list,close_list) but they are not @@ -1542,22 +1550,22 @@ struct net_device { netdev_features_t mpls_features; int ifindex; - int iflink; + int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; - atomic_t carrier_changes; - #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def * wireless_handlers; struct iw_public_data * wireless_data; #endif const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; - const struct forwarding_accel_ops *fwd_ops; +#ifdef CONFIG_NET_SWITCHDEV + const struct swdev_ops *swdev_ops; +#endif const struct header_ops *header_ops; @@ -1588,6 +1596,8 @@ struct net_device { unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; + unsigned char name_assign_type; + bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; @@ -1595,10 +1605,6 @@ struct net_device { #ifdef CONFIG_SYSFS struct kset *queues_kset; #endif - - unsigned char name_assign_type; - - bool uc_promisc; unsigned int promiscuity; unsigned int allmulti; @@ -1645,7 +1651,10 @@ struct net_device { struct netdev_queue __rcu *ingress_queue; unsigned char broadcast[MAX_ADDR_LEN]; - +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rx_cpu_rmap; +#endif + struct hlist_node index_hlist; /* * Cache lines mostly used on transmit path @@ -1656,13 +1665,11 @@ struct net_device { struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; + int watchdog_timeo; #ifdef CONFIG_XPS struct xps_dev_maps __rcu *xps_maps; #endif -#ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rx_cpu_rmap; -#endif /* These may be needed for future network-power-down code. */ @@ -1672,13 +1679,11 @@ struct net_device { */ unsigned long trans_start; - int watchdog_timeo; struct timer_list watchdog_timer; int __percpu *pcpu_refcnt; struct list_head todo_list; - struct hlist_node index_hlist; struct list_head link_watch_list; enum { NETREG_UNINITIALIZED=0, @@ -1702,9 +1707,7 @@ struct net_device { struct netpoll_info __rcu *npinfo; #endif -#ifdef CONFIG_NET_NS - struct net *nd_net; -#endif + possible_net_t nd_net; /* mid-layer private */ union { @@ -1745,8 +1748,6 @@ struct net_device { #endif struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; - int group; - struct pm_qos_request pm_qos_req; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -1844,10 +1845,7 @@ struct net *dev_net(const struct net_device *dev) static inline void dev_net_set(struct net_device *dev, struct net *net) { -#ifdef CONFIG_NET_NS - release_net(dev->nd_net); - dev->nd_net = hold_net(net); -#endif + write_pnet(&dev->nd_net, net); } static inline bool netdev_uses_dsa(struct net_device *dev) @@ -2159,6 +2157,7 @@ void __dev_remove_pack(struct packet_type *pt); void dev_add_offload(struct packet_offload *po); void dev_remove_offload(struct packet_offload *po); +int dev_get_iflink(const struct net_device *dev); struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); @@ -2167,9 +2166,14 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); int dev_open(struct net_device *dev); int dev_close(struct net_device *dev); +int dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); -int dev_loopback_xmit(struct sk_buff *newskb); -int dev_queue_xmit(struct sk_buff *skb); +int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb); +int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb); +static inline int dev_queue_xmit(struct sk_buff *skb) +{ + return dev_queue_xmit_sk(skb->sk, skb); +} int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); @@ -2409,15 +2413,6 @@ static inline int dev_parse_header(const struct sk_buff *skb, return dev->header_ops->parse(skb, haddr); } -static inline int dev_rebuild_header(struct sk_buff *skb) -{ - const struct net_device *dev = skb->dev; - - if (!dev->header_ops || !dev->header_ops->rebuild) - return 0; - return dev->header_ops->rebuild(skb); -} - typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) @@ -2939,7 +2934,11 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); -int netif_receive_skb(struct sk_buff *skb); +int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb); +static inline int netif_receive_skb(struct sk_buff *skb) +{ + return netif_receive_skb_sk(skb->sk, skb); +} gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); void napi_gro_flush(struct napi_struct *napi, bool flush_old); struct sk_buff *napi_get_frags(struct napi_struct *napi); @@ -2975,6 +2974,8 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); +int dev_get_phys_port_name(struct net_device *dev, + char *name, size_t len); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3679,6 +3680,9 @@ void netdev_change_features(struct net_device *dev); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); +netdev_features_t passthru_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); netdev_features_t netif_skb_features(struct sk_buff *skb); static inline bool net_gso_ok(netdev_features_t features, int gso_type) diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 2517ece98820..63560d0a8dfe 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -44,11 +44,39 @@ int netfilter_init(void); struct sk_buff; struct nf_hook_ops; + +struct sock; + +struct nf_hook_state { + unsigned int hook; + int thresh; + u_int8_t pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + int (*okfn)(struct sock *, struct sk_buff *); +}; + +static inline void nf_hook_state_init(struct nf_hook_state *p, + unsigned int hook, + int thresh, u_int8_t pf, + struct net_device *indev, + struct net_device *outdev, + struct sock *sk, + int (*okfn)(struct sock *, struct sk_buff *)) +{ + p->hook = hook; + p->thresh = thresh; + p->pf = pf; + p->in = indev; + p->out = outdev; + p->sk = sk; + p->okfn = okfn; +} + typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)); + const struct nf_hook_state *state); struct nf_hook_ops { struct list_head list; @@ -118,9 +146,7 @@ static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) } #endif -int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *), int thresh); +int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); /** * nf_hook_thresh - call a netfilter hook @@ -130,21 +156,29 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, * value indicates the packet has been consumed by the hook. */ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *), int thresh) + int (*okfn)(struct sock *, struct sk_buff *), + int thresh) { - if (nf_hooks_active(pf, hook)) - return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); + if (nf_hooks_active(pf, hook)) { + struct nf_hook_state state; + + nf_hook_state_init(&state, hook, thresh, pf, + indev, outdev, sk, okfn); + return nf_hook_slow(skb, &state); + } return 1; } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *)) +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, + struct sk_buff *skb, struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct sock *, struct sk_buff *)) { - return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN); + return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN); } /* Activate hook; either okfn or kfree_skb called, unless a hook @@ -165,35 +199,36 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, */ static inline int -NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *in, struct net_device *out, - int (*okfn)(struct sk_buff *), int thresh) +NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk, + struct sk_buff *skb, struct net_device *in, + struct net_device *out, + int (*okfn)(struct sock *, struct sk_buff *), int thresh) { - int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh); + int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh); if (ret == 1) - ret = okfn(skb); + ret = okfn(sk, skb); return ret; } static inline int -NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *in, struct net_device *out, - int (*okfn)(struct sk_buff *), bool cond) +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct sock *, struct sk_buff *), bool cond) { int ret; if (!cond || - ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1)) - ret = okfn(skb); + ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1)) + ret = okfn(sk, skb); return ret; } static inline int -NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb, +NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, - int (*okfn)(struct sk_buff *)) + int (*okfn)(struct sock *, struct sk_buff *)) { - return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN); + return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN); } /* Call setsockopt() */ @@ -293,19 +328,21 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) } #else /* !CONFIG_NETFILTER */ -#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) -#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) +#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb) +#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb) static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *), int thresh) + int (*okfn)(struct sock *sk, struct sk_buff *), int thresh) { - return okfn(skb); + return okfn(sk, skb); } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *)) +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, + struct sk_buff *skb, struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct sock *, struct sk_buff *)) { return 1; } diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index f1606fa6132d..34b172301558 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -483,7 +483,7 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr) if (!__nested) return -EMSGSIZE; - ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); + ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); if (!ret) ipset_nest_end(skb, __nested); return ret; @@ -497,8 +497,7 @@ static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, if (!__nested) return -EMSGSIZE; - ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6, - sizeof(struct in6_addr), ipaddrptr); + ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr); if (!ret) ipset_nest_end(skb, __nested); return ret; diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index cfb7191e6efa..c22a7fb8d0df 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -54,8 +54,7 @@ extern struct xt_table *arpt_register_table(struct net *net, extern void arpt_unregister_table(struct xt_table *table); extern unsigned int arpt_do_table(struct sk_buff *skb, unsigned int hook, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct xt_table *table); #ifdef CONFIG_COMPAT diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index c755e4971fa3..ab8f76dba668 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -2,7 +2,7 @@ #define __LINUX_BRIDGE_NETFILTER_H #include <uapi/linux/netfilter_bridge.h> - +#include <linux/skbuff.h> enum nf_br_hook_priorities { NF_BR_PRI_FIRST = INT_MIN, @@ -17,110 +17,48 @@ enum nf_br_hook_priorities { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) -#define BRNF_PKT_TYPE 0x01 #define BRNF_BRIDGED_DNAT 0x02 -#define BRNF_BRIDGED 0x04 #define BRNF_NF_BRIDGE_PREROUTING 0x08 -#define BRNF_8021Q 0x10 -#define BRNF_PPPoE 0x20 -static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) +static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) { - switch (skb->protocol) { - case __cpu_to_be16(ETH_P_8021Q): - return VLAN_HLEN; - case __cpu_to_be16(ETH_P_PPP_SES): + if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE) return PPPOE_SES_HLEN; - default: - return 0; - } + return 0; } -static inline void nf_bridge_update_protocol(struct sk_buff *skb) -{ - if (skb->nf_bridge->mask & BRNF_8021Q) - skb->protocol = htons(ETH_P_8021Q); - else if (skb->nf_bridge->mask & BRNF_PPPoE) - skb->protocol = htons(ETH_P_PPP_SES); -} +int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); -/* Fill in the header for fragmented IP packets handled by - * the IPv4 connection tracking code. - * - * Only used in br_forward.c - */ -static inline int nf_bridge_copy_header(struct sk_buff *skb) +static inline void br_drop_fake_rtable(struct sk_buff *skb) { - int err; - unsigned int header_size; - - nf_bridge_update_protocol(skb); - header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); - err = skb_cow_head(skb, header_size); - if (err) - return err; + struct dst_entry *dst = skb_dst(skb); - skb_copy_to_linear_data_offset(skb, -header_size, - skb->nf_bridge->data, header_size); - __skb_push(skb, nf_bridge_encap_header_len(skb)); - return 0; + if (dst && (dst->flags & DST_FAKE_RTABLE)) + skb_dst_drop(skb); } -static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) +static inline int nf_bridge_get_physinif(const struct sk_buff *skb) { - if (skb->nf_bridge && - skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) - return nf_bridge_copy_header(skb); - return 0; + return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0; } -static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) +static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) { - if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) - return PPPOE_SES_HLEN; - return 0; + return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0; } -int br_handle_frame_finish(struct sk_buff *skb); -/* Only used in br_device.c */ -static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) +static inline struct net_device * +nf_bridge_get_physindev(const struct sk_buff *skb) { - struct nf_bridge_info *nf_bridge = skb->nf_bridge; - - skb_pull(skb, ETH_HLEN); - nf_bridge->mask ^= BRNF_BRIDGED_DNAT; - skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), - skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); - skb->dev = nf_bridge->physindev; - return br_handle_frame_finish(skb); + return skb->nf_bridge ? skb->nf_bridge->physindev : NULL; } -/* This is called by the IP fragmenting code and it ensures there is - * enough room for the encapsulating header (if there is one). */ -static inline unsigned int nf_bridge_pad(const struct sk_buff *skb) +static inline struct net_device * +nf_bridge_get_physoutdev(const struct sk_buff *skb) { - if (skb->nf_bridge) - return nf_bridge_encap_header_len(skb); - return 0; + return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; } - -struct bridge_skb_cb { - union { - __be32 ipv4; - } daddr; -}; - -static inline void br_drop_fake_rtable(struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - - if (dst && (dst->flags & DST_FAKE_RTABLE)) - skb_dst_drop(skb); -} - #else -#define nf_bridge_maybe_copy_header(skb) (0) -#define nf_bridge_pad(skb) (0) #define br_drop_fake_rtable(skb) do { } while (0) #endif /* CONFIG_BRIDGE_NETFILTER */ diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 34e7a2b7f867..f1bd3962e6b6 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -12,9 +12,10 @@ #ifndef __LINUX_BRIDGE_EFF_H #define __LINUX_BRIDGE_EFF_H +#include <linux/if.h> +#include <linux/if_ether.h> #include <uapi/linux/netfilter_bridge/ebtables.h> - /* return values for match() functions */ #define EBT_MATCH 0 #define EBT_NOMATCH 1 diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 901e84db847d..4073510da485 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -65,8 +65,7 @@ struct ipt_error { extern void *ipt_alloc_initial_table(const struct xt_table *); extern unsigned int ipt_do_table(struct sk_buff *skb, unsigned int hook, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct xt_table *table); #ifdef CONFIG_COMPAT diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 610208b18c05..b40d2b635778 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -31,8 +31,7 @@ extern struct xt_table *ip6t_register_table(struct net *net, extern void ip6t_unregister_table(struct net *net, struct xt_table *table); extern unsigned int ip6t_do_table(struct sk_buff *skb, unsigned int hook, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct xt_table *table); /* Check for an extension */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 02fc86d2348e..6835c1279df7 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -134,7 +134,7 @@ struct netlink_callback { struct netlink_notify { struct net *net; - int portid; + u32 portid; int protocol; }; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b01ccf371fdc..410abd172feb 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -447,13 +447,12 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file) /* * linux/fs/nfs/direct.c */ -extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); +extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t); extern ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, loff_t pos); extern ssize_t nfs_file_direct_write(struct kiocb *iocb, - struct iov_iter *iter, - loff_t pos); + struct iov_iter *iter); /* * linux/fs/nfs/dir.c diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 0adad4a5419b..8dbd05e70f09 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -117,8 +117,9 @@ struct nvme_ns { unsigned ns_id; int lba_shift; - int ms; - int pi_type; + u16 ms; + bool ext; + u8 pi_type; u64 mode_select_num_blocks; u32 mode_select_block_len; }; diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index d449018d0726..8f2237eb3485 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -24,6 +24,7 @@ struct phy_device *of_phy_attach(struct net_device *dev, phy_interface_t iface); extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); +extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); #else /* CONFIG_OF */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) @@ -60,6 +61,12 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) { return NULL; } + +static inline int of_mdio_parse_addr(struct device *dev, + const struct device_node *np) +{ + return -ENOSYS; +} #endif /* CONFIG_OF */ #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) diff --git a/include/linux/of_net.h b/include/linux/of_net.h index 34597c8c1a4c..9cd72aab76fe 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -9,8 +9,11 @@ #ifdef CONFIG_OF_NET #include <linux/of.h> + +struct net_device; extern int of_get_phy_mode(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np); +extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else static inline int of_get_phy_mode(struct device_node *np) { @@ -21,6 +24,11 @@ static inline const void *of_get_mac_address(struct device_node *np) { return NULL; } + +static inline struct net_device *of_find_net_device_by_node(struct device_node *np) +{ + return NULL; +} #endif #endif /* __LINUX_OF_NET_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index c851ff92d5b3..f34e040b34e9 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -289,6 +289,47 @@ PAGEFLAG_FALSE(HWPoison) #define __PG_HWPOISON 0 #endif +/* + * On an anonymous page mapped into a user virtual memory area, + * page->mapping points to its anon_vma, not to a struct address_space; + * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. + * + * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, + * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; + * and then page->mapping points, not to an anon_vma, but to a private + * structure which KSM associates with that merged page. See ksm.h. + * + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. + * + * Please note that, confusingly, "page_mapping" refers to the inode + * address_space which maps the page from disk; whereas "page_mapped" + * refers to user virtual address space into which the page is mapped. + */ +#define PAGE_MAPPING_ANON 1 +#define PAGE_MAPPING_KSM 2 +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) + +static inline int PageAnon(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; +} + +#ifdef CONFIG_KSM +/* + * A KSM page is one of those write-protected "shared pages" or "merged pages" + * which KSM maps into multiple mms, wherever identical anonymous page content + * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any + * anon_vma, but to that page's node of the stable tree. + */ +static inline int PageKsm(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); +} +#else +TESTPAGEFLAG_FALSE(Ksm) +#endif + u64 stable_page_flags(struct page *page); static inline int PageUptodate(struct page *page) @@ -426,6 +467,21 @@ static inline void ClearPageCompound(struct page *page) #endif /* !PAGEFLAGS_EXTENDED */ +#ifdef CONFIG_HUGETLB_PAGE +int PageHuge(struct page *page); +int PageHeadHuge(struct page *page); +bool page_huge_active(struct page *page); +#else +TESTPAGEFLAG_FALSE(Huge) +TESTPAGEFLAG_FALSE(HeadHuge) + +static inline bool page_huge_active(struct page *page) +{ + return 0; +} +#endif + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * PageHuge() only returns true for hugetlbfs pages, but not for @@ -480,6 +536,53 @@ static inline int PageTransTail(struct page *page) #endif /* + * PageBuddy() indicate that the page is free and in the buddy system + * (see mm/page_alloc.c). + * + * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to + * -2 so that an underflow of the page_mapcount() won't be mistaken + * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very + * efficiently by most CPU architectures. + */ +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) + +static inline int PageBuddy(struct page *page) +{ + return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; +} + +static inline void __SetPageBuddy(struct page *page) +{ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); + atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); +} + +static inline void __ClearPageBuddy(struct page *page) +{ + VM_BUG_ON_PAGE(!PageBuddy(page), page); + atomic_set(&page->_mapcount, -1); +} + +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) + +static inline int PageBalloon(struct page *page) +{ + return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; +} + +static inline void __SetPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); + atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); +} + +static inline void __ClearPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(!PageBalloon(page), page); + atomic_set(&page->_mapcount, -1); +} + +/* * If network-based swap is enabled, sl*b must keep track of whether pages * were allocated from pfmemalloc reserves. */ diff --git a/include/linux/pci.h b/include/linux/pci.h index e63112fb55be..353db8dc4c6e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1178,6 +1178,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); resource_size_t pcibios_window_alignment(struct pci_bus *bus, unsigned long type); +resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) @@ -1673,13 +1674,25 @@ int pci_ext_cfg_avail(void); void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); #ifdef CONFIG_PCI_IOV +int pci_iov_virtfn_bus(struct pci_dev *dev, int id); +int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); + int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); void pci_disable_sriov(struct pci_dev *dev); int pci_num_vf(struct pci_dev *dev); int pci_vfs_assigned(struct pci_dev *dev); int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); int pci_sriov_get_totalvfs(struct pci_dev *dev); +resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); #else +static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) +{ + return -ENOSYS; +} +static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) +{ + return -ENOSYS; +} static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } static inline void pci_disable_sriov(struct pci_dev *dev) { } @@ -1690,6 +1703,8 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) { return 0; } static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) { return 0; } +static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) +{ return 0; } #endif #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) diff --git a/include/linux/personality.h b/include/linux/personality.h index 646c0a7d50fa..aeb7892b2468 100644 --- a/include/linux/personality.h +++ b/include/linux/personality.h @@ -3,52 +3,14 @@ #include <uapi/linux/personality.h> - -/* - * Handling of different ABIs (personalities). - */ - -struct exec_domain; -struct pt_regs; - -extern int register_exec_domain(struct exec_domain *); -extern int unregister_exec_domain(struct exec_domain *); -extern int __set_personality(unsigned int); - - -/* - * Description of an execution domain. - * - * The first two members are refernced from assembly source - * and should stay where they are unless explicitly needed. - */ -typedef void (*handler_t)(int, struct pt_regs *); - -struct exec_domain { - const char *name; /* name of the execdomain */ - handler_t handler; /* handler for syscalls */ - unsigned char pers_low; /* lowest personality */ - unsigned char pers_high; /* highest personality */ - unsigned long *signal_map; /* signal mapping */ - unsigned long *signal_invmap; /* reverse signal mapping */ - struct map_segment *err_map; /* error mapping */ - struct map_segment *socktype_map; /* socket type mapping */ - struct map_segment *sockopt_map; /* socket option mapping */ - struct map_segment *af_map; /* address family mapping */ - struct module *module; /* module context of the ed. */ - struct exec_domain *next; /* linked list (internal) */ -}; - /* * Return the base personality without flags. */ #define personality(pers) (pers & PER_MASK) - /* * Change personality of the currently running process. */ -#define set_personality(pers) \ - ((current->personality == (pers)) ? 0 : __set_personality(pers)) +#define set_personality(pers) (current->personality = (pers)) #endif /* _LINUX_PERSONALITY_H */ diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 7e75bfe37cc7..fe5732d53eda 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); +extern int fixed_phy_update_state(struct phy_device *phydev, + const struct fixed_phy_status *status, + const struct fixed_phy_status *changed); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status) @@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, { return -ENODEV; } +static inline int fixed_phy_update_state(struct phy_device *phydev, + const struct fixed_phy_status *status, + const struct fixed_phy_status *changed) +{ + return -ENODEV; +} #endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h new file mode 100644 index 000000000000..d6ed28679bb2 --- /dev/null +++ b/include/linux/platform_data/nxp-nci.h @@ -0,0 +1,27 @@ +/* + * Generic platform data for the NXP NCI NFC chips. + * + * Copyright (C) 2014 NXP Semiconductors All rights reserved. + * + * Authors: Clément Perrochaud <clement.perrochaud@nxp.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _NXP_NCI_H_ +#define _NXP_NCI_H_ + +struct nxp_nci_nfc_platform_data { + unsigned int gpio_en; + unsigned int gpio_fw; + unsigned int irq; +}; + +#endif /* _NXP_NCI_H_ */ diff --git a/include/linux/platform_data/tpm_stm_st33.h b/include/linux/platform_data/st33zp24.h index ff75310c0f47..817dfdb37885 100644 --- a/include/linux/platform_data/tpm_stm_st33.h +++ b/include/linux/platform_data/st33zp24.h @@ -1,6 +1,6 @@ /* - * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24 - * Copyright (C) 2009, 2010 STMicroelectronics + * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24 + * Copyright (C) 2009 - 2015 STMicroelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,20 +14,9 @@ * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. - * - * STMicroelectronics version 1.2.0, Copyright (C) 2010 - * STMicroelectronics comes with ABSOLUTELY NO WARRANTY. - * This is free software, and you are welcome to redistribute it - * under certain conditions. - * - * @Author: Christophe RICARD tpmsupport@st.com - * - * @File: stm_st33_tpm.h - * - * @Date: 09/15/2010 */ -#ifndef __STM_ST33_TPM_H__ -#define __STM_ST33_TPM_H__ +#ifndef __ST33ZP24_H__ +#define __ST33ZP24_H__ #define TPM_ST33_I2C "st33zp24-i2c" #define TPM_ST33_SPI "st33zp24-spi" @@ -36,4 +25,4 @@ struct st33zp24_platform_data { int io_lpcpd; }; -#endif /* __STM_ST33_TPM_H__ */ +#endif /* __ST33ZP24_H__ */ diff --git a/include/linux/printk.h b/include/linux/printk.h index baa3f97d8ce8..9b30871c9149 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -255,6 +255,11 @@ extern asmlinkage void dump_stack(void) __cold; printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info(fmt, ...) \ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +/* + * Like KERN_CONT, pr_cont() should only be used when continuing + * a line with no newline ('\n') enclosed. Otherwise it defaults + * back to KERN_DEFAULT. + */ #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 8884f6e507f7..8e7a25b068b0 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -40,6 +40,7 @@ enum pstore_type_id { PSTORE_TYPE_PPC_OF = 5, PSTORE_TYPE_PPC_COMMON = 6, PSTORE_TYPE_PMSG = 7, + PSTORE_TYPE_PPC_OPAL = 8, PSTORE_TYPE_UNKNOWN = 255 }; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 0d8ff3fb84ba..b8b73066d137 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -64,11 +64,11 @@ struct ptp_clock_request { * @adjtime: Shifts the time of the hardware clock. * parameter delta: Desired change in nanoseconds. * - * @gettime: Reads the current time from the hardware clock. - * parameter ts: Holds the result. + * @gettime64: Reads the current time from the hardware clock. + * parameter ts: Holds the result. * - * @settime: Set the current time on the hardware clock. - * parameter ts: Time value to set. + * @settime64: Set the current time on the hardware clock. + * parameter ts: Time value to set. * * @enable: Request driver to enable or disable an ancillary feature. * parameter request: Desired resource to enable or disable. @@ -104,8 +104,8 @@ struct ptp_clock_info { struct ptp_pin_desc *pin_config; int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); - int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts); - int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts); + int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); + int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); int (*enable)(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on); int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, diff --git a/include/linux/quota.h b/include/linux/quota.h index d534e8ed308a..b2505acfd3c0 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -50,6 +50,7 @@ #undef USRQUOTA #undef GRPQUOTA +#undef PRJQUOTA enum quota_type { USRQUOTA = 0, /* element used for user quotas */ GRPQUOTA = 1, /* element used for group quotas */ @@ -319,6 +320,7 @@ struct dquot_operations { /* get reserved quota for delayed alloc, value returned is managed by * quota code only */ qsize_t *(*get_reserved_space) (struct inode *); + int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */ }; struct path; @@ -344,7 +346,10 @@ struct qc_dqblk { int d_rt_spc_warns; /* # warnings issued wrt RT space */ }; -/* Field specifiers for ->set_dqblk() in struct qc_dqblk */ +/* + * Field specifiers for ->set_dqblk() in struct qc_dqblk and also for + * ->set_info() in struct qc_info + */ #define QC_INO_SOFT (1<<0) #define QC_INO_HARD (1<<1) #define QC_SPC_SOFT (1<<2) @@ -365,6 +370,51 @@ struct qc_dqblk { #define QC_INO_COUNT (1<<13) #define QC_RT_SPACE (1<<14) #define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE) +#define QC_FLAGS (1<<15) + +#define QCI_SYSFILE (1 << 0) /* Quota file is hidden from userspace */ +#define QCI_ROOT_SQUASH (1 << 1) /* Root squash turned on */ +#define QCI_ACCT_ENABLED (1 << 2) /* Quota accounting enabled */ +#define QCI_LIMITS_ENFORCED (1 << 3) /* Quota limits enforced */ + +/* Structures for communicating via ->get_state */ +struct qc_type_state { + unsigned int flags; /* Flags QCI_* */ + unsigned int spc_timelimit; /* Time after which space softlimit is + * enforced */ + unsigned int ino_timelimit; /* Ditto for inode softlimit */ + unsigned int rt_spc_timelimit; /* Ditto for real-time space */ + unsigned int spc_warnlimit; /* Limit for number of space warnings */ + unsigned int ino_warnlimit; /* Ditto for inodes */ + unsigned int rt_spc_warnlimit; /* Ditto for real-time space */ + unsigned long long ino; /* Inode number of quota file */ + blkcnt_t blocks; /* Number of 512-byte blocks in the file */ + blkcnt_t nextents; /* Number of extents in the file */ +}; + +struct qc_state { + unsigned int s_incoredqs; /* Number of dquots in core */ + /* + * Per quota type information. The array should really have + * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in + * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS + * supports project quotas, this can be changed to MAXQUOTAS + */ + struct qc_type_state s_state[XQM_MAXQUOTAS]; +}; + +/* Structure for communicating via ->set_info */ +struct qc_info { + int i_fieldmask; /* mask of fields to change in ->set_info() */ + unsigned int i_flags; /* Flags QCI_* */ + unsigned int i_spc_timelimit; /* Time after which space softlimit is + * enforced */ + unsigned int i_ino_timelimit; /* Ditto for inode softlimit */ + unsigned int i_rt_spc_timelimit;/* Ditto for real-time space */ + unsigned int i_spc_warnlimit; /* Limit for number of space warnings */ + unsigned int i_ino_warnlimit; /* Limit for number of inode warnings */ + unsigned int i_rt_spc_warnlimit; /* Ditto for real-time space */ +}; /* Operations handling requests from userspace */ struct quotactl_ops { @@ -373,12 +423,10 @@ struct quotactl_ops { int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); - int (*get_info)(struct super_block *, int, struct if_dqinfo *); - int (*set_info)(struct super_block *, int, struct if_dqinfo *); + int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); - int (*get_xstate)(struct super_block *, struct fs_quota_stat *); - int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); + int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); }; @@ -389,7 +437,19 @@ struct quota_format_type { struct quota_format_type *qf_next; }; -/* Quota state flags - they actually come in two flavors - for users and groups */ +/** + * Quota state flags - they actually come in two flavors - for users and groups. + * + * Actual typed flags layout: + * USRQUOTA GRPQUOTA + * DQUOT_USAGE_ENABLED 0x0001 0x0002 + * DQUOT_LIMITS_ENABLED 0x0004 0x0008 + * DQUOT_SUSPENDED 0x0010 0x0020 + * + * Following bits are used for non-typed flags: + * DQUOT_QUOTA_SYS_FILE 0x0040 + * DQUOT_NEGATIVE_USAGE 0x0080 + */ enum { _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */ _DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */ @@ -398,9 +458,9 @@ enum { * memory to turn them on */ _DQUOT_STATE_FLAGS }; -#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED) -#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED) -#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED) +#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED * MAXQUOTAS) +#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED * MAXQUOTAS) +#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED * MAXQUOTAS) #define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \ DQUOT_SUSPENDED) /* Other quota flags */ @@ -414,15 +474,21 @@ enum { */ #define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1)) /* Allow negative quota usage */ - static inline unsigned int dquot_state_flag(unsigned int flags, int type) { - return flags << _DQUOT_STATE_FLAGS * type; + return flags << type; } static inline unsigned int dquot_generic_flag(unsigned int flags, int type) { - return (flags >> _DQUOT_STATE_FLAGS * type) & DQUOT_STATE_FLAGS; + return (flags >> type) & DQUOT_STATE_FLAGS; +} + +/* Bitmap of quota types where flag is set in flags */ +static __always_inline unsigned dquot_state_types(unsigned flags, unsigned flag) +{ + BUILD_BUG_ON_NOT_POWER_OF_2(flag); + return (flags / flag) & ((1 << MAXQUOTAS) - 1); } #ifdef CONFIG_QUOTA_NETLINK_INTERFACE diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index df73258cca47..77ca6601ff25 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -95,8 +95,8 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name, int dquot_quota_off(struct super_block *sb, int type); int dquot_writeback_dquots(struct super_block *sb, int type); int dquot_quota_sync(struct super_block *sb, int type); -int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); -int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int dquot_get_state(struct super_block *sb, struct qc_state *state); +int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii); int dquot_get_dqblk(struct super_block *sb, struct kqid id, struct qc_dqblk *di); int dquot_set_dqblk(struct super_block *sb, struct kqid id, @@ -134,10 +134,7 @@ static inline bool sb_has_quota_suspended(struct super_block *sb, int type) static inline unsigned sb_any_quota_suspended(struct super_block *sb) { - unsigned type, tmsk = 0; - for (type = 0; type < MAXQUOTAS; type++) - tmsk |= sb_has_quota_suspended(sb, type) << type; - return tmsk; + return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_SUSPENDED); } /* Does kernel know about any quota information for given sb + type? */ @@ -149,10 +146,7 @@ static inline bool sb_has_quota_loaded(struct super_block *sb, int type) static inline unsigned sb_any_quota_loaded(struct super_block *sb) { - unsigned type, tmsk = 0; - for (type = 0; type < MAXQUOTAS; type++) - tmsk |= sb_has_quota_loaded(sb, type) << type; - return tmsk; + return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_USAGE_ENABLED); } static inline bool sb_has_quota_active(struct super_block *sb, int type) diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 67fc8fcdc4b0..a7ff409f386d 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -70,7 +70,8 @@ void ctrl_alt_del(void); #define POWEROFF_CMD_PATH_LEN 256 extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN]; -extern int orderly_poweroff(bool force); +extern void orderly_poweroff(bool force); +extern void orderly_reboot(void); /* * Emergency restart, callable from an interrupt handler. diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index d438eeb08bff..e23d242d1230 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -1,14 +1,13 @@ /* * Resizable, Scalable, Concurrent Hash Table * - * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * - * Based on the following paper by Josh Triplett, Paul E. McKenney - * and Jonathan Walpole: - * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf - * * Code partially derived from nft_hash + * Rewritten with rehash code from br_multicast plus single list + * pointer as suggested by Josh Triplett * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -19,9 +18,12 @@ #define _LINUX_RHASHTABLE_H #include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/jhash.h> #include <linux/list_nulls.h> #include <linux/workqueue.h> #include <linux/mutex.h> +#include <linux/rcupdate.h> /* * The end of the chain is marked with a special nulls marks which has @@ -42,6 +44,9 @@ #define RHT_HASH_BITS 27 #define RHT_BASE_SHIFT RHT_HASH_BITS +/* Base bits plus 1 bit for nulls marker */ +#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) + struct rhash_head { struct rhash_head __rcu *next; }; @@ -49,20 +54,43 @@ struct rhash_head { /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets + * @rehash: Current bucket being rehashed + * @hash_rnd: Random seed to fold into hash * @locks_mask: Mask to apply before accessing locks[] * @locks: Array of spinlocks protecting individual buckets + * @walkers: List of active walkers + * @rcu: RCU structure for freeing the table + * @future_tbl: Table under construction during rehashing * @buckets: size * hash buckets */ struct bucket_table { - size_t size; + unsigned int size; + unsigned int rehash; + u32 hash_rnd; unsigned int locks_mask; spinlock_t *locks; + struct list_head walkers; + struct rcu_head rcu; + + struct bucket_table __rcu *future_tbl; struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; +/** + * struct rhashtable_compare_arg - Key for the function rhashtable_compare + * @ht: Hash table + * @key: Key to compare against + */ +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); -typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed); +typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, + const void *obj); struct rhashtable; @@ -72,60 +100,62 @@ struct rhashtable; * @key_len: Length of key * @key_offset: Offset of key in struct to be hashed * @head_offset: Offset of rhash_head in struct to be hashed - * @hash_rnd: Seed to use while hashing - * @max_shift: Maximum number of shifts while expanding - * @min_shift: Minimum number of shifts while shrinking + * @max_size: Maximum size while expanding + * @min_size: Minimum size while shrinking * @nulls_base: Base value to generate nulls marker + * @insecure_elasticity: Set to true to disable chain length checks + * @automatic_shrinking: Enable automatic shrinking of tables * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) - * @hashfn: Function to hash key + * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) * @obj_hashfn: Function to hash object + * @obj_cmpfn: Function to compare key with object */ struct rhashtable_params { size_t nelem_hint; size_t key_len; size_t key_offset; size_t head_offset; - u32 hash_rnd; - size_t max_shift; - size_t min_shift; + unsigned int max_size; + unsigned int min_size; u32 nulls_base; + bool insecure_elasticity; + bool automatic_shrinking; size_t locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; }; /** * struct rhashtable - Hash table handle * @tbl: Bucket table - * @future_tbl: Table under construction during expansion/shrinking * @nelems: Number of elements in table - * @shift: Current size (1 << shift) + * @key_len: Key length for hashfn + * @elasticity: Maximum chain length before rehash * @p: Configuration parameters * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping - * @walkers: List of active walkers - * @being_destroyed: True if table is set up for destruction + * @lock: Spin lock to protect walker list */ struct rhashtable { struct bucket_table __rcu *tbl; - struct bucket_table __rcu *future_tbl; atomic_t nelems; - atomic_t shift; + unsigned int key_len; + unsigned int elasticity; struct rhashtable_params p; struct work_struct run_work; struct mutex mutex; - struct list_head walkers; - bool being_destroyed; + spinlock_t lock; }; /** * struct rhashtable_walker - Hash table walker * @list: List entry on list of walkers - * @resize: Resize event occured + * @tbl: The table that we were walking over */ struct rhashtable_walker { struct list_head list; - bool resize; + struct bucket_table *tbl; }; /** @@ -162,6 +192,118 @@ static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) return ((unsigned long) ptr) >> 1; } +static inline void *rht_obj(const struct rhashtable *ht, + const struct rhash_head *he) +{ + return (char *)he - ht->p.head_offset; +} + +static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, + unsigned int hash) +{ + return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); +} + +static inline unsigned int rht_key_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const void *key, const struct rhashtable_params params) +{ + unsigned int hash; + + /* params must be equal to ht->p if it isn't constant. */ + if (!__builtin_constant_p(params.key_len)) + hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); + else if (params.key_len) { + unsigned int key_len = params.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, tbl->hash_rnd); + else if (key_len & (sizeof(u32) - 1)) + hash = jhash(key, key_len, tbl->hash_rnd); + else + hash = jhash2(key, key_len / sizeof(u32), + tbl->hash_rnd); + } else { + unsigned int key_len = ht->p.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, tbl->hash_rnd); + else + hash = jhash(key, key_len, tbl->hash_rnd); + } + + return rht_bucket_index(tbl, hash); +} + +static inline unsigned int rht_head_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const struct rhash_head *he, const struct rhashtable_params params) +{ + const char *ptr = rht_obj(ht, he); + + return likely(params.obj_hashfn) ? + rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: + ht->p.key_len, + tbl->hash_rnd)) : + rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); +} + +/** + * rht_grow_above_75 - returns true if nelems > 0.75 * table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_grow_above_75(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + /* Expand table when exceeding 75% load */ + return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && + (!ht->p.max_size || tbl->size < ht->p.max_size); +} + +/** + * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_shrink_below_30(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + /* Shrink table beneath 30% load */ + return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && + tbl->size > ht->p.min_size; +} + +/** + * rht_grow_above_100 - returns true if nelems > table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_grow_above_100(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + return atomic_read(&ht->nelems) > tbl->size; +} + +/* The bucket lock is selected based on the hash and protects mutations + * on a group of hash buckets. + * + * A maximum of tbl->size/2 bucket locks is allocated. This ensures that + * a single lock always covers both buckets which may both contains + * entries which link to the same bucket of the old table during resizing. + * This allows to simplify the locking as locking the bucket in both + * tables during resize always guarantee protection. + * + * IMPORTANT: When holding the bucket lock of both the old and new table + * during expansions and shrinking, the old bucket lock must always be + * acquired first. + */ +static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, + unsigned int hash) +{ + return &tbl->locks[hash & tbl->locks_mask]; +} + #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(struct rhashtable *ht); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); @@ -178,23 +320,13 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, } #endif /* CONFIG_PROVE_LOCKING */ -int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); - -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); - -int rhashtable_expand(struct rhashtable *ht); -int rhashtable_shrink(struct rhashtable *ht); +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params); -void *rhashtable_lookup(struct rhashtable *ht, const void *key); -void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, - bool (*compare)(void *, void *), void *arg); - -bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj); -bool rhashtable_lookup_compare_insert(struct rhashtable *ht, - struct rhash_head *obj, - bool (*compare)(void *, void *), - void *arg); +int rhashtable_insert_slow(struct rhashtable *ht, const void *key, + struct rhash_head *obj, + struct bucket_table *old_tbl); +int rhashtable_insert_rehash(struct rhashtable *ht); int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); @@ -202,6 +334,9 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); void *rhashtable_walk_next(struct rhashtable_iter *iter); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); +void rhashtable_free_and_destroy(struct rhashtable *ht, + void (*free_fn)(void *ptr, void *arg), + void *arg); void rhashtable_destroy(struct rhashtable *ht); #define rht_dereference(p, ht) \ @@ -352,4 +487,316 @@ void rhashtable_destroy(struct rhashtable *ht); rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ tbl, hash, member) +static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, + const void *obj) +{ + struct rhashtable *ht = arg->ht; + const char *ptr = obj; + + return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); +} + +/** + * rhashtable_lookup_fast - search hash table, inlined version + * @ht: hash table + * @key: the pointer to the key + * @params: hash table parameters + * + * Computes the hash value for the key and traverses the bucket chain looking + * for a entry with an identical key. The first matching entry is returned. + * + * Returns the first entry on which the compare function returned true. + */ +static inline void *rhashtable_lookup_fast( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + const struct bucket_table *tbl; + struct rhash_head *he; + unsigned int hash; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); +restart: + hash = rht_key_hashfn(ht, tbl, key, params); + rht_for_each_rcu(he, tbl, hash) { + if (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, he)) : + rhashtable_compare(&arg, rht_obj(ht, he))) + continue; + rcu_read_unlock(); + return rht_obj(ht, he); + } + + /* Ensure we see any new tables. */ + smp_rmb(); + + tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (unlikely(tbl)) + goto restart; + rcu_read_unlock(); + + return NULL; +} + +/* Internal function, please use rhashtable_insert_fast() instead */ +static inline int __rhashtable_insert_fast( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + struct bucket_table *tbl, *new_tbl; + struct rhash_head *head; + spinlock_t *lock; + unsigned int elasticity; + unsigned int hash; + int err; + +restart: + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* All insertions must grab the oldest table containing + * the hashed bucket that is yet to be rehashed. + */ + for (;;) { + hash = rht_head_hashfn(ht, tbl, obj, params); + lock = rht_bucket_lock(tbl, hash); + spin_lock_bh(lock); + + if (tbl->rehash <= hash) + break; + + spin_unlock_bh(lock); + tbl = rht_dereference_rcu(tbl->future_tbl, ht); + } + + new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (unlikely(new_tbl)) { + err = rhashtable_insert_slow(ht, key, obj, new_tbl); + if (err == -EAGAIN) + goto slow_path; + goto out; + } + + if (unlikely(rht_grow_above_100(ht, tbl))) { +slow_path: + spin_unlock_bh(lock); + err = rhashtable_insert_rehash(ht); + rcu_read_unlock(); + if (err) + return err; + + goto restart; + } + + err = -EEXIST; + elasticity = ht->elasticity; + rht_for_each(head, tbl, hash) { + if (key && + unlikely(!(params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, head)) : + rhashtable_compare(&arg, rht_obj(ht, head))))) + goto out; + if (!--elasticity) + goto slow_path; + } + + err = 0; + + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + + RCU_INIT_POINTER(obj->next, head); + + rcu_assign_pointer(tbl->buckets[hash], obj); + + atomic_inc(&ht->nelems); + if (rht_grow_above_75(ht, tbl)) + schedule_work(&ht->run_work); + +out: + spin_unlock_bh(lock); + rcu_read_unlock(); + + return err; +} + +/** + * rhashtable_insert_fast - insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +static inline int rhashtable_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + return __rhashtable_insert_fast(ht, NULL, obj, params); +} + +/** + * rhashtable_lookup_insert_fast - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * This lookup function may only be used for fixed key hash table (key_len + * parameter set). It will BUG() if used inappropriately. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +static inline int rhashtable_lookup_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + const char *key = rht_obj(ht, obj); + + BUG_ON(ht->p.obj_hashfn); + + return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, + params); +} + +/** + * rhashtable_lookup_insert_key - search and insert object to hash table + * with explicit key + * @ht: hash table + * @key: key + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * Lookups may occur in parallel with hashtable mutations and resizing. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + * + * Returns zero on success. + */ +static inline int rhashtable_lookup_insert_key( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + BUG_ON(!ht->p.obj_hashfn || !key); + + return __rhashtable_insert_fast(ht, key, obj, params); +} + +/* Internal function, please use rhashtable_remove_fast() instead */ +static inline int __rhashtable_remove_fast( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj, const struct rhashtable_params params) +{ + struct rhash_head __rcu **pprev; + struct rhash_head *he; + spinlock_t * lock; + unsigned int hash; + int err = -ENOENT; + + hash = rht_head_hashfn(ht, tbl, obj, params); + lock = rht_bucket_lock(tbl, hash); + + spin_lock_bh(lock); + + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { + if (he != obj) { + pprev = &he->next; + continue; + } + + rcu_assign_pointer(*pprev, obj->next); + err = 0; + break; + } + + spin_unlock_bh(lock); + + return err; +} + +/** + * rhashtable_remove_fast - remove object from hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Since the hash chain is single linked, the removal operation needs to + * walk the bucket chain upon removal. The removal operation is thus + * considerable slow if the hash table is not correctly sized. + * + * Will automatically shrink the table via rhashtable_expand() if the + * shrink_decision function specified at rhashtable_init() returns true. + * + * Returns zero on success, -ENOENT if the entry could not be found. + */ +static inline int rhashtable_remove_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* Because we have already taken (and released) the bucket + * lock in old_tbl, if we find that future_tbl is not yet + * visible then that guarantees the entry to still be in + * the old tbl if it exists. + */ + while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) && + (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) + ; + + if (err) + goto out; + + atomic_dec(&ht->nelems); + if (unlikely(ht->p.automatic_shrinking && + rht_shrink_below_30(ht, tbl))) + schedule_work(&ht->run_work); + +out: + rcu_read_unlock(); + + return err; +} + #endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c4c559a45dc8..c89c53a113a8 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -105,14 +105,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma) __put_anon_vma(anon_vma); } -static inline struct anon_vma *page_anon_vma(struct page *page) -{ - if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != - PAGE_MAPPING_ANON) - return NULL; - return page_rmapping(page); -} - static inline void vma_lock_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 5db76a32fcab..2da5d1081ad9 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -77,7 +77,20 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) return rtnl_dereference(dev->ingress_queue); } -extern struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); +struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); + +#ifdef CONFIG_NET_CLS_ACT +void net_inc_ingress_queue(void); +void net_dec_ingress_queue(void); +#else +static inline void net_inc_ingress_queue(void) +{ +} + +static inline void net_dec_ingress_queue(void) +{ +} +#endif extern void rtnetlink_init(void); extern void __rtnl_unlock(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index f74d4cc3a3e5..8222ae40ecb0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -125,7 +125,6 @@ struct sched_attr { u64 sched_period; }; -struct exec_domain; struct futex_pi_state; struct robust_list_head; struct bio_list; @@ -2311,11 +2310,6 @@ extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); -/* - * The default (Linux) execution domain. - */ -extern struct exec_domain default_exec_domain; - union thread_union { struct thread_info thread_info; unsigned long stack[THREAD_SIZE/sizeof(long)]; diff --git a/include/linux/security.h b/include/linux/security.h index 4e14e3d6309f..18264ea9e314 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1716,7 +1716,6 @@ struct security_operations { int (*tun_dev_attach_queue) (void *security); int (*tun_dev_attach) (struct sock *sk, void *security); int (*tun_dev_open) (void *security); - void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -2734,8 +2733,6 @@ int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); -void security_skb_owned_by(struct sk_buff *skb, struct sock *sk); - #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, struct sock *other, @@ -2927,11 +2924,6 @@ static inline int security_tun_dev_open(void *security) { return 0; } - -static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) -{ -} - #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f54d6659713a..0991259643d6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -166,10 +166,16 @@ struct nf_conntrack { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { atomic_t use; + enum { + BRNF_PROTO_UNCHANGED, + BRNF_PROTO_8021Q, + BRNF_PROTO_PPPOE + } orig_proto; + bool pkt_otherhost; unsigned int mask; struct net_device *physindev; struct net_device *physoutdev; - unsigned long data[32 / sizeof(unsigned long)]; + char neigh_header[8]; }; #endif @@ -492,7 +498,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark - * @dropcount: total number of sk_receive_queue overflows * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information * @inner_protocol: Protocol (encapsulation) @@ -641,7 +646,6 @@ struct sk_buff { #endif union { __u32 mark; - __u32 dropcount; __u32 reserved_tailroom; }; @@ -870,8 +874,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, void skb_abort_seq_read(struct skb_seq_state *st); unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, - unsigned int to, struct ts_config *config, - struct ts_state *state); + unsigned int to, struct ts_config *config); /* * Packet hash types specify the type of hash in skb_set_hash. diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 46cca4c06848..083ac388098e 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -19,8 +19,8 @@ void sock_diag_unregister(const struct sock_diag_handler *h); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); -int sock_diag_check_cookie(void *sk, __u32 *cookie); -void sock_diag_save_cookie(void *sk, __u32 *cookie); +int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); +void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, diff --git a/include/linux/socket.h b/include/linux/socket.h index 5c19cba34dce..5bf59c8493b7 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -51,6 +51,7 @@ struct msghdr { void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ + struct kiocb *msg_iocb; /* ptr to iocb for async requests */ }; struct user_msghdr { @@ -138,6 +139,11 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); } +static inline size_t msg_data_left(struct msghdr *msg) +{ + return iov_iter_count(&msg->msg_iter); +} + /* "Socket"-level control message types: */ #define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */ @@ -181,6 +187,7 @@ struct ucred { #define AF_WANPIPE 25 /* Wanpipe API Sockets */ #define AF_LLC 26 /* Linux LLC */ #define AF_IB 27 /* Native InfiniBand address */ +#define AF_MPLS 28 /* MPLS */ #define AF_CAN 29 /* Controller Area Network */ #define AF_TIPC 30 /* TIPC sockets */ #define AF_BLUETOOTH 31 /* Bluetooth sockets */ @@ -226,6 +233,7 @@ struct ucred { #define PF_WANPIPE AF_WANPIPE #define PF_LLC AF_LLC #define PF_IB AF_IB +#define PF_MPLS AF_MPLS #define PF_CAN AF_CAN #define PF_TIPC AF_TIPC #define PF_BLUETOOTH AF_BLUETOOTH diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h index cd519a11c2c6..b63fe6f5fdc8 100644 --- a/include/linux/spi/at86rf230.h +++ b/include/linux/spi/at86rf230.h @@ -22,6 +22,7 @@ struct at86rf230_platform_data { int rstn; int slp_tr; int dig2; + u8 xtal_trim; }; #endif diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h index 85b8ee67e937..e741e8baad92 100644 --- a/include/linux/spi/cc2520.h +++ b/include/linux/spi/cc2520.h @@ -21,6 +21,7 @@ struct cc2520_platform_data { int sfd; int reset; int vreg; + bool amplified; }; #endif diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 657571817260..71f711db4500 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -10,7 +10,7 @@ enum string_size_units { STRING_UNITS_2, /* use binary powers of 2^10 */ }; -void string_get_size(u64 size, enum string_size_units units, +void string_get_size(u64 size, u64 blk_size, enum string_size_units units, char *buf, int len); #define UNESCAPE_SPACE 0x01 @@ -47,22 +47,22 @@ static inline int string_unescape_any_inplace(char *buf) #define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP) #define ESCAPE_HEX 0x20 -int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz, +int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, unsigned int flags, const char *esc); static inline int string_escape_mem_any_np(const char *src, size_t isz, - char **dst, size_t osz, const char *esc) + char *dst, size_t osz, const char *esc) { return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc); } -static inline int string_escape_str(const char *src, char **dst, size_t sz, +static inline int string_escape_str(const char *src, char *dst, size_t sz, unsigned int flags, const char *esc) { return string_escape_mem(src, strlen(src), dst, sz, flags, esc); } -static inline int string_escape_str_any_np(const char *src, char **dst, +static inline int string_escape_str_any_np(const char *src, char *dst, size_t sz, const char *esc) { return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc); diff --git a/include/linux/swap.h b/include/linux/swap.h index 7067eca501e2..cee108cbe2d5 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -307,7 +307,7 @@ extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_all(void); extern void rotate_reclaimable_page(struct page *page); -extern void deactivate_page(struct page *page); +extern void deactivate_file_page(struct page *page); extern void swap_setup(void); extern void add_page_to_unevictable_list(struct page *page); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index b7361f831226..795d5fea5697 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -212,4 +212,7 @@ static inline void setup_sysctl_set(struct ctl_table_set *p, #endif /* CONFIG_SYSCTL */ +int sysctl_max_threads(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + #endif /* _LINUX_SYSCTL_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 1a7adb411647..0caa3a2d4106 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -58,6 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) struct tcp_fastopen_cookie { s8 len; u8 val[TCP_FASTOPEN_COOKIE_MAX]; + bool exp; /* In RFC6994 experimental option format */ }; /* This defines a selective acknowledgement block. */ @@ -111,7 +112,7 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; - struct sock *listener; /* needed for TFO */ + bool tfo_listener; u32 rcv_isn; u32 snt_isn; u32 snt_synack; /* synack sent time */ @@ -188,6 +189,7 @@ struct tcp_sock { u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ syn_data:1, /* SYN includes data */ syn_fastopen:1, /* SYN includes Fast Open option */ + syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ @@ -236,7 +238,6 @@ struct tcp_sock { u32 lost_out; /* Lost packets */ u32 sacked_out; /* SACK'd packets */ u32 fackets_out; /* FACK'd packets */ - u32 tso_deferred; /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; diff --git a/include/linux/types.h b/include/linux/types.h index 6747247e3f9f..59698be03490 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -146,12 +146,6 @@ typedef u64 dma_addr_t; typedef u32 dma_addr_t; #endif /* dma_addr_t */ -#ifdef __CHECKER__ -#else -#endif -#ifdef __CHECK_ENDIAN__ -#else -#endif typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise__ fmode_t; typedef unsigned __bitwise__ oom_flags_t; diff --git a/include/linux/udp.h b/include/linux/udp.h index 247cfdcc4b08..87c094961bd5 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb) #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) -static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask) +static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) { return (num + net_hash_mix(net)) & mask; } diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h index 2d1f9b627f91..0ee05da38899 100644 --- a/include/linux/uidgid.h +++ b/include/linux/uidgid.h @@ -29,6 +29,7 @@ typedef struct { #define KUIDT_INIT(value) (kuid_t){ value } #define KGIDT_INIT(value) (kgid_t){ value } +#ifdef CONFIG_MULTIUSER static inline uid_t __kuid_val(kuid_t uid) { return uid.val; @@ -38,6 +39,17 @@ static inline gid_t __kgid_val(kgid_t gid) { return gid.val; } +#else +static inline uid_t __kuid_val(kuid_t uid) +{ + return 0; +} + +static inline gid_t __kgid_val(kgid_t gid) +{ + return 0; +} +#endif #define GLOBAL_ROOT_UID KUIDT_INIT(0) #define GLOBAL_ROOT_GID KGIDT_INIT(0) diff --git a/include/linux/uio.h b/include/linux/uio.h index 15f11fb9fff6..8b01e1c3c614 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -112,6 +112,14 @@ static inline bool iter_is_iovec(struct iov_iter *i) } /* + * Get one of READ or WRITE out of iter->type without any other flags OR'd in + * with it. + * + * The ?: is just for type safety. + */ +#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK) + +/* * Cap the iov_iter by given limit; note that the second argument is * *not* the new size - it's upper limit for such. Passing it a value * greater than the amount of data in iov_iter is fine - it'll just do diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h new file mode 100644 index 000000000000..d5f4fb69dba3 --- /dev/null +++ b/include/linux/util_macros.h @@ -0,0 +1,40 @@ +#ifndef _LINUX_HELPER_MACROS_H_ +#define _LINUX_HELPER_MACROS_H_ + +#define __find_closest(x, a, as, op) \ +({ \ + typeof(as) __fc_i, __fc_as = (as) - 1; \ + typeof(x) __fc_x = (x); \ + typeof(*a) *__fc_a = (a); \ + for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ + if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ + __fc_a[__fc_i + 1], 2)) \ + break; \ + } \ + (__fc_i); \ +}) + +/** + * find_closest - locate the closest element in a sorted array + * @x: The reference value. + * @a: The array in which to look for the closest element. Must be sorted + * in ascending order. + * @as: Size of 'a'. + * + * Returns the index of the element closest to 'x'. + */ +#define find_closest(x, a, as) __find_closest(x, a, as, <=) + +/** + * find_closest_descending - locate the closest element in a sorted array + * @x: The reference value. + * @a: The array in which to look for the closest element. Must be sorted + * in descending order. + * @as: Size of 'a'. + * + * Similar to find_closest() but 'a' is expected to be sorted in descending + * order. + */ +#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=) + +#endif diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 3283c6a55425..1338190b5478 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -47,5 +47,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, void zs_unmap_object(struct zs_pool *pool, unsigned long handle); unsigned long zs_get_total_pages(struct zs_pool *pool); +unsigned long zs_compact(struct zs_pool *pool); #endif |