diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 269 |
1 files changed, 193 insertions, 76 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ba5583522d24..ca8806b69388 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -14,6 +14,7 @@ #include <linux/compiler.h> #include <linux/time.h> #include <linux/bug.h> +#include <linux/bvec.h> #include <linux/cache.h> #include <linux/rbtree.h> #include <linux/socket.h> @@ -36,6 +37,9 @@ #include <linux/in6.h> #include <linux/if_packet.h> #include <net/flow.h> +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include <linux/netfilter/nf_conntrack_common.h> +#endif /* The interface for checksum offload between the stack and networking drivers * is as follows... @@ -243,12 +247,6 @@ struct bpf_prog; union bpf_attr; struct skb_ext; -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -struct nf_conntrack { - atomic_t use; -}; -#endif - #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { enum { @@ -278,6 +276,16 @@ struct nf_bridge_info { }; #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) +/* Chain in tc_skb_ext will be used to share the tc chain with + * ovs recirc_id. It will be set to the current chain by tc + * and read by ovs to recirc_id. + */ +struct tc_skb_ext { + __u32 chain; +}; +#endif + struct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; @@ -308,58 +316,45 @@ extern int sysctl_max_skb_frags; */ #define GSO_BY_FRAGS 0xFFFF -typedef struct skb_frag_struct skb_frag_t; - -struct skb_frag_struct { - struct { - struct page *p; - } page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; - __u32 size; -#else - __u16 page_offset; - __u16 size; -#endif -}; +typedef struct bio_vec skb_frag_t; /** - * skb_frag_size - Returns the size of a skb fragment + * skb_frag_size() - Returns the size of a skb fragment * @frag: skb fragment */ static inline unsigned int skb_frag_size(const skb_frag_t *frag) { - return frag->size; + return frag->bv_len; } /** - * skb_frag_size_set - Sets the size of a skb fragment + * skb_frag_size_set() - Sets the size of a skb fragment * @frag: skb fragment * @size: size of fragment */ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) { - frag->size = size; + frag->bv_len = size; } /** - * skb_frag_size_add - Incrementes the size of a skb fragment by %delta + * skb_frag_size_add() - Increments the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to add */ static inline void skb_frag_size_add(skb_frag_t *frag, int delta) { - frag->size += delta; + frag->bv_len += delta; } /** - * skb_frag_size_sub - Decrements the size of a skb fragment by %delta + * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to subtract */ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { - frag->size -= delta; + frag->bv_len -= delta; } /** @@ -379,7 +374,7 @@ static inline bool skb_frag_must_loop(struct page *p) * skb_frag_foreach_page - loop over pages in a fragment * * @f: skb frag to operate on - * @f_off: offset from start of f->page.p + * @f_off: offset from start of f->bv_page * @f_len: length from f_off to loop over * @p: (temp var) current page * @p_off: (temp var) offset from start of current page, @@ -597,6 +592,8 @@ enum { SKB_GSO_UDP = 1 << 16, SKB_GSO_UDP_L4 = 1 << 17, + + SKB_GSO_FRAGLIST = 1 << 18, }; #if BITS_PER_LONG > 32 @@ -916,7 +913,6 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) #define SKB_DST_NOREF 1UL #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) -#define SKB_NFCT_PTRMASK ~(7UL) /** * skb_dst - returns skb dst_entry * @skb: buffer @@ -1283,7 +1279,7 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) struct bpf_flow_dissector; bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen); + __be16 proto, int nhoff, int hlen, unsigned int flags); bool __skb_flow_dissect(const struct net *net, const struct sk_buff *skb, @@ -1360,7 +1356,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 return skb->hash; } -__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); +__u32 skb_get_hash_perturb(const struct sk_buff *skb, + const siphash_key_t *perturb); static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) { @@ -1483,6 +1480,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb) skb->next = NULL; } +/* Iterate through singly-linked GSO fragments of an skb. */ +#define skb_list_walk_safe(first, skb, next_skb) \ + for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \ + (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL) + static inline void skb_list_del_init(struct sk_buff *skb) { __list_del_entry(&skb->list); @@ -1501,6 +1503,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) } /** + * skb_queue_empty_lockless - check if a queue is empty + * @list: queue head + * + * Returns true if the queue is empty, false otherwise. + * This variant can be used in lockless contexts. + */ +static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list) +{ + return READ_ONCE(list->next) == (const struct sk_buff *) list; +} + + +/** * skb_queue_is_last - check if skb is the last entry in the queue * @list: queue head * @skb: buffer @@ -1787,7 +1802,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, */ static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) { - struct sk_buff *skb = list_->prev; + struct sk_buff *skb = READ_ONCE(list_->prev); if (skb == (struct sk_buff *)list_) skb = NULL; @@ -1807,6 +1822,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) } /** + * skb_queue_len_lockless - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + * This variant can be used in lockless contexts. + */ +static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) +{ + return READ_ONCE(list_->qlen); +} + +/** * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head * @list: queue to initialize * @@ -1853,9 +1880,13 @@ static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) { - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; + /* See skb_queue_empty_lockless() and skb_peek_tail() + * for the opposite READ_ONCE() + */ + WRITE_ONCE(newsk->next, next); + WRITE_ONCE(newsk->prev, prev); + WRITE_ONCE(next->prev, newsk); + WRITE_ONCE(prev->next, newsk); list->qlen++; } @@ -1866,11 +1897,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list, struct sk_buff *first = list->next; struct sk_buff *last = list->prev; - first->prev = prev; - prev->next = first; + WRITE_ONCE(first->prev, prev); + WRITE_ONCE(prev->next, first); - last->next = next; - next->prev = last; + WRITE_ONCE(last->next, next); + WRITE_ONCE(next->prev, last); } /** @@ -2007,12 +2038,12 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next, *prev; - list->qlen--; + WRITE_ONCE(list->qlen, list->qlen - 1); next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; - next->prev = prev; - prev->next = next; + WRITE_ONCE(next->prev, prev); + WRITE_ONCE(prev->next, next); } /** @@ -2097,8 +2128,8 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). */ - frag->page.p = page; - frag->page_offset = off; + frag->bv_page = page; + frag->bv_offset = off; skb_frag_size_set(frag, size); page = compound_head(page); @@ -2267,12 +2298,12 @@ static inline void *pskb_pull(struct sk_buff *skb, unsigned int len) return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); } -static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) +static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len) { if (likely(len <= skb_headlen(skb))) - return 1; + return true; if (unlikely(len > skb->len)) - return 0; + return false; return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } @@ -2878,6 +2909,46 @@ static inline void skb_propagate_pfmemalloc(struct page *page, } /** + * skb_frag_off() - Returns the offset of a skb fragment + * @frag: the paged fragment + */ +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->bv_offset; +} + +/** + * skb_frag_off_add() - Increments the offset of a skb fragment by @delta + * @frag: skb fragment + * @delta: value to add + */ +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->bv_offset += delta; +} + +/** + * skb_frag_off_set() - Sets the offset of a skb fragment + * @frag: skb fragment + * @offset: offset of fragment + */ +static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) +{ + frag->bv_offset = offset; +} + +/** + * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment + * @fragto: skb fragment where offset is set + * @fragfrom: skb fragment offset is copied from + */ +static inline void skb_frag_off_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_offset = fragfrom->bv_offset; +} + +/** * skb_frag_page - retrieve the page referred to by a paged fragment * @frag: the paged fragment * @@ -2885,7 +2956,7 @@ static inline void skb_propagate_pfmemalloc(struct page *page, */ static inline struct page *skb_frag_page(const skb_frag_t *frag) { - return frag->page.p; + return frag->bv_page; } /** @@ -2943,7 +3014,7 @@ static inline void skb_frag_unref(struct sk_buff *skb, int f) */ static inline void *skb_frag_address(const skb_frag_t *frag) { - return page_address(skb_frag_page(frag)) + frag->page_offset; + return page_address(skb_frag_page(frag)) + skb_frag_off(frag); } /** @@ -2959,7 +3030,18 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) if (unlikely(!ptr)) return NULL; - return ptr + frag->page_offset; + return ptr + skb_frag_off(frag); +} + +/** + * skb_frag_page_copy() - sets the page in a fragment from another fragment + * @fragto: skb fragment where page is set + * @fragfrom: skb fragment page is copied from + */ +static inline void skb_frag_page_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_page = fragfrom->bv_page; } /** @@ -2971,7 +3053,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) */ static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) { - frag->page.p = page; + frag->bv_page = page; } /** @@ -3007,7 +3089,7 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), - frag->page_offset + offset, size, dir); + skb_frag_off(frag) + offset, size, dir); } static inline struct sk_buff *pskb_copy(struct sk_buff *skb, @@ -3174,10 +3256,10 @@ static inline bool skb_can_coalesce(struct sk_buff *skb, int i, if (skb_zcopy(skb)) return false; if (i) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; return page == skb_frag_page(frag) && - off == frag->page_offset + skb_frag_size(frag); + off == skb_frag_off(frag) + skb_frag_size(frag); } return false; } @@ -3396,7 +3478,8 @@ static inline void skb_frag_list_init(struct sk_buff *skb) for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) -int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, +int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, + int *err, long *timeo_p, const struct sk_buff *skb); struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, struct sk_buff_head *queue, @@ -3405,12 +3488,16 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, struct sk_buff *skb), int *off, int *err, struct sk_buff **last); -struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, +struct sk_buff *__skb_try_recv_datagram(struct sock *sk, + struct sk_buff_head *queue, + unsigned int flags, void (*destructor)(struct sock *sk, struct sk_buff *skb), int *off, int *err, struct sk_buff **last); -struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, +struct sk_buff *__skb_recv_datagram(struct sock *sk, + struct sk_buff_head *sk_queue, + unsigned int flags, void (*destructor)(struct sock *sk, struct sk_buff *skb), int *off, int *err); @@ -3460,13 +3547,17 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); +struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, + unsigned int offset); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, int write_len); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); -int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto); -int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto); +int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, + int mac_len, bool ethernet); +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, + bool ethernet); int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); int skb_mpls_dec_ttl(struct sk_buff *skb); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, @@ -3594,9 +3685,12 @@ static inline void skb_get_new_timestamp(const struct sk_buff *skb, } static inline void skb_get_timestampns(const struct sk_buff *skb, - struct timespec *stamp) + struct __kernel_old_timespec *stamp) { - *stamp = ktime_to_timespec(skb->tstamp); + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); + + stamp->tv_sec = ts.tv_sec; + stamp->tv_nsec = ts.tv_nsec; } static inline void skb_get_new_timestampns(const struct sk_buff *skb, @@ -3991,25 +4085,27 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) - return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); + return (void *)(skb->_nfct & NFCT_PTRMASK); #else return NULL; #endif } -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -void nf_conntrack_destroy(struct nf_conntrack *nfct); -static inline void nf_conntrack_put(struct nf_conntrack *nfct) +static inline unsigned long skb_get_nfct(const struct sk_buff *skb) { - if (nfct && atomic_dec_and_test(&nfct->use)) - nf_conntrack_destroy(nfct); +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + return skb->_nfct; +#else + return 0UL; +#endif } -static inline void nf_conntrack_get(struct nf_conntrack *nfct) + +static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) { - if (nfct) - atomic_inc(&nfct->use); -} +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + skb->_nfct = nfct; #endif +} #ifdef CONFIG_SKB_EXTENSIONS enum skb_ext_id { @@ -4019,6 +4115,12 @@ enum skb_ext_id { #ifdef CONFIG_XFRM SKB_EXT_SEC_PATH, #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + TC_SKB_EXT, +#endif +#if IS_ENABLED(CONFIG_MPTCP) + SKB_EXT_MPTCP, +#endif SKB_EXT_NUM, /* must be last */ }; @@ -4039,6 +4141,9 @@ struct skb_ext { char data[0] __aligned(8); }; +struct skb_ext *__skb_ext_alloc(void); +void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, + struct skb_ext *ext); void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); void __skb_ext_put(struct skb_ext *ext); @@ -4094,22 +4199,34 @@ static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) return NULL; } + +static inline void skb_ext_reset(struct sk_buff *skb) +{ + if (unlikely(skb->active_extensions)) { + __skb_ext_put(skb->extensions); + skb->active_extensions = 0; + } +} + +static inline bool skb_has_extensions(struct sk_buff *skb) +{ + return unlikely(skb->active_extensions); +} #else static inline void skb_ext_put(struct sk_buff *skb) {} +static inline void skb_ext_reset(struct sk_buff *skb) {} static inline void skb_ext_del(struct sk_buff *skb, int unused) {} static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} +static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } #endif /* CONFIG_SKB_EXTENSIONS */ -static inline void nf_reset(struct sk_buff *skb) +static inline void nf_reset_ct(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(skb)); skb->_nfct = 0; #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - skb_ext_del(skb, SKB_EXT_BRIDGE_NF); -#endif } static inline void nf_reset_trace(struct sk_buff *skb) |