summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/9p/9p.h14
-rw-r--r--include/net/9p/client.h8
-rw-r--r--include/net/9p/transport.h10
-rw-r--r--include/net/caif/caif_hsi.h37
-rw-r--r--include/net/inet_ecn.h8
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/sch_generic.h24
-rw-r--r--include/net/secure_seq.h2
-rw-r--r--include/net/tcp.h63
-rw-r--r--include/net/udplite.h63
12 files changed, 134 insertions, 111 deletions
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index a6326ef8ade6..2d70b95b3b55 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -76,11 +76,8 @@ do { \
} \
} while (0)
-#define P9_DUMP_PKT(way, pdu) p9pdu_dump(way, pdu)
-
#else
#define P9_DPRINTK(level, format, arg...) do { } while (0)
-#define P9_DUMP_PKT(way, pdu) do { } while (0)
#endif
@@ -359,6 +356,9 @@ enum p9_qid_t {
/* Room for readdir header */
#define P9_READDIRHDRSZ 24
+/* size of header for zero copy read/write */
+#define P9_ZC_HDR_SZ 4096
+
/**
* struct p9_qid - file system entity information
* @type: 8-bit type &p9_qid_t
@@ -555,10 +555,6 @@ struct p9_rstatfs {
* @tag: transaction id of the request
* @offset: used by marshalling routines to track current position in buffer
* @capacity: used by marshalling routines to track total malloc'd capacity
- * @pubuf: Payload user buffer given by the caller
- * @pkbuf: Payload kernel buffer given by the caller
- * @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write.
- * @private: For transport layer's use.
* @sdata: payload
*
* &p9_fcall represents the structure for all 9P RPC
@@ -575,10 +571,6 @@ struct p9_fcall {
size_t offset;
size_t capacity;
- char __user *pubuf;
- char *pkbuf;
- size_t pbuf_size;
- void *private;
u8 *sdata;
};
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 55ce72ce9861..fc9b90b0c052 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -151,7 +151,7 @@ struct p9_req_t {
struct p9_client {
spinlock_t lock; /* protect client structure */
- int msize;
+ unsigned int msize;
unsigned char proto_version;
struct p9_trans_module *trans_mod;
enum p9_trans_status status;
@@ -240,8 +240,8 @@ int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
u64 offset, u32 count);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
-int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
- int proto_version);
+int p9dirent_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_dirent *dirent);
struct p9_wstat *p9_client_stat(struct p9_fid *fid);
int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst);
int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr);
@@ -259,7 +259,7 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
-int p9stat_read(char *, int, struct p9_wstat *, int);
+int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
void p9stat_free(struct p9_wstat *);
int p9_is_proto_dotu(struct p9_client *clnt);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 83531ebeee99..adcbb20f6511 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -26,13 +26,6 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
-#define P9_TRANS_PREF_PAYLOAD_MASK 0x1
-
-/* Default. Add Payload to PDU before sending it down to transport layer */
-#define P9_TRANS_PREF_PAYLOAD_DEF 0x0
-/* Send pay load separately to transport layer along with PDU.*/
-#define P9_TRANS_PREF_PAYLOAD_SEP 0x1
-
/**
* struct p9_trans_module - transport module interface
* @list: used to maintain a list of currently available transports
@@ -56,13 +49,14 @@ struct p9_trans_module {
struct list_head list;
char *name; /* name of transport */
int maxsize; /* max message size of transport */
- int pref; /* Preferences of this transport */
int def; /* this transport should be default */
struct module *owner;
int (*create)(struct p9_client *, const char *, char *);
void (*close) (struct p9_client *);
int (*request) (struct p9_client *, struct p9_req_t *req);
int (*cancel) (struct p9_client *, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *, struct p9_req_t *,
+ char *, char *, int , int, int, int);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index c5dedd87b4cb..8d552519ff67 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -52,8 +52,9 @@ struct cfhsi_desc {
/*
* Maximum bytes transferred in one transfer.
*/
-/* TODO: 4096 is temporary... */
-#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * 4096)
+#define CFHSI_MAX_CAIF_FRAME_SZ 4096
+
+#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
/* Size of the complete HSI TX buffer. */
#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
@@ -75,18 +76,21 @@ struct cfhsi_desc {
#define CFHSI_WAKE_UP_ACK 1
#define CFHSI_WAKE_DOWN_ACK 2
#define CFHSI_AWAKE 3
-#define CFHSI_PENDING_RX 4
-#define CFHSI_SHUTDOWN 6
-#define CFHSI_FLUSH_FIFO 7
+#define CFHSI_WAKELOCK_HELD 4
+#define CFHSI_SHUTDOWN 5
+#define CFHSI_FLUSH_FIFO 6
#ifndef CFHSI_INACTIVITY_TOUT
#define CFHSI_INACTIVITY_TOUT (1 * HZ)
#endif /* CFHSI_INACTIVITY_TOUT */
-#ifndef CFHSI_WAKEUP_TOUT
-#define CFHSI_WAKEUP_TOUT (3 * HZ)
-#endif /* CFHSI_WAKEUP_TOUT */
+#ifndef CFHSI_WAKE_TOUT
+#define CFHSI_WAKE_TOUT (3 * HZ)
+#endif /* CFHSI_WAKE_TOUT */
+#ifndef CFHSI_MAX_RX_RETRIES
+#define CFHSI_MAX_RX_RETRIES (10 * HZ)
+#endif
/* Structure implemented by the CAIF HSI driver. */
struct cfhsi_drv {
@@ -104,11 +108,21 @@ struct cfhsi_dev {
int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_dev *dev);
int (*cfhsi_wake_up) (struct cfhsi_dev *dev);
int (*cfhsi_wake_down) (struct cfhsi_dev *dev);
+ int (*cfhsi_get_peer_wake) (struct cfhsi_dev *dev, bool *status);
int (*cfhsi_fifo_occupancy)(struct cfhsi_dev *dev, size_t *occupancy);
int (*cfhsi_rx_cancel)(struct cfhsi_dev *dev);
struct cfhsi_drv *drv;
};
+/* Structure holds status of received CAIF frames processing */
+struct cfhsi_rx_state {
+ int state;
+ int nfrms;
+ int pld_len;
+ int retries;
+ bool piggy_desc;
+};
+
/* Structure implemented by CAIF HSI drivers. */
struct cfhsi {
struct caif_dev_common cfdev;
@@ -118,7 +132,8 @@ struct cfhsi {
struct cfhsi_drv drv;
struct cfhsi_dev *dev;
int tx_state;
- int rx_state;
+ struct cfhsi_rx_state rx_state;
+ unsigned long inactivity_timeout;
int rx_len;
u8 *rx_ptr;
u8 *tx_buf;
@@ -130,13 +145,13 @@ struct cfhsi {
struct list_head list;
struct work_struct wake_up_work;
struct work_struct wake_down_work;
- struct work_struct rx_done_work;
- struct work_struct tx_done_work;
+ struct work_struct out_of_sync_work;
struct workqueue_struct *wq;
wait_queue_head_t wake_up_wait;
wait_queue_head_t wake_down_wait;
wait_queue_head_t flush_fifo_wait;
struct timer_list timer;
+ struct timer_list rx_slowpath_timer;
unsigned long bits;
};
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 2fa8d1341a0a..2fa14691869c 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -30,6 +30,14 @@ static inline int INET_ECN_is_capable(__u8 dsfield)
return dsfield & INET_ECN_ECT_0;
}
+/*
+ * RFC 3168 9.1.1
+ * The full-functionality option for ECN encapsulation is to copy the
+ * ECN codepoint of the inside header to the outside header on
+ * encapsulation if the inside header is not-ECT or ECT, and to set the
+ * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of
+ * the inside header is CE.
+ */
static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
{
outer &= ~INET_ECN_MASK;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index f1a770977c4f..180231c5bbbe 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -126,7 +126,8 @@ struct inet_timewait_sock {
/* And these are ours. */
unsigned int tw_ipv6only : 1,
tw_transparent : 1,
- tw_pad : 14, /* 14 bits hole */
+ tw_pad : 6, /* 6 bits hole */
+ tw_tos : 8,
tw_ipv6_offset : 16;
kmemcheck_bitfield_end(flags);
unsigned long tw_ttd;
diff --git a/include/net/ip.h b/include/net/ip.h
index aa76c7a4d9c3..eca0ef7a495e 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -165,6 +165,7 @@ struct ip_reply_arg {
int csumoffset; /* u16 offset of csum in iov[0].iov_base */
/* -1 if not needed */
int bound_dev_if;
+ u8 tos;
};
#define IP_REPLY_ARG_NOSRCCHECK 1
@@ -175,7 +176,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
}
void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
- struct ip_reply_arg *arg, unsigned int len);
+ const struct ip_reply_arg *arg, unsigned int len);
struct ipv4_config {
int log_martians;
@@ -406,9 +407,18 @@ enum ip_defrag_users {
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD,
IP_DEFRAG_AF_PACKET,
+ IP_DEFRAG_MACVLAN,
};
int ip_defrag(struct sk_buff *skb, u32 user);
+#ifdef CONFIG_INET
+struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
+#else
+static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+{
+ return skb;
+}
+#endif
int ip_frag_mem(struct net *net);
int ip_frag_nqueues(struct net *net);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 1aaf915656f3..8fa4430f99c1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -900,6 +900,7 @@ struct netns_ipvs {
volatile int sync_state;
volatile int master_syncid;
volatile int backup_syncid;
+ struct mutex sync_mutex;
/* multicast interface name */
char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4fc88f3ccd5f..2eb207ea4eaf 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -46,14 +46,14 @@ struct qdisc_size_table {
struct Qdisc {
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
struct sk_buff * (*dequeue)(struct Qdisc *dev);
- unsigned flags;
+ unsigned int flags;
#define TCQ_F_BUILTIN 1
#define TCQ_F_INGRESS 2
#define TCQ_F_CAN_BYPASS 4
#define TCQ_F_MQROOT 8
#define TCQ_F_WARN_NONWC (1 << 16)
int padded;
- struct Qdisc_ops *ops;
+ const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
struct list_head list;
u32 handle;
@@ -224,7 +224,7 @@ struct qdisc_skb_cb {
long data[];
};
-static inline int qdisc_qlen(struct Qdisc *q)
+static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
@@ -239,12 +239,12 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
return &qdisc->q.lock;
}
-static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
+static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc;
}
-static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
+static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;
}
@@ -260,7 +260,7 @@ static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
* root. This is enforced by holding the RTNL semaphore, which
* all users of this lock accessor must do.
*/
-static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
+static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root(qdisc);
@@ -268,7 +268,7 @@ static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
+static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);
@@ -276,17 +276,17 @@ static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
+static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
}
-static inline void sch_tree_lock(struct Qdisc *q)
+static inline void sch_tree_lock(const struct Qdisc *q)
{
spin_lock_bh(qdisc_root_sleeping_lock(q));
}
-static inline void sch_tree_unlock(struct Qdisc *q)
+static inline void sch_tree_unlock(const struct Qdisc *q)
{
spin_unlock_bh(qdisc_root_sleeping_lock(q));
}
@@ -319,7 +319,7 @@ static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
}
static inline struct Qdisc_class_common *
-qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
+qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
{
struct Qdisc_class_common *cl;
struct hlist_node *n;
@@ -393,7 +393,7 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
}
/* Are any of the TX qdiscs changing? */
-static inline bool qdisc_tx_changing(struct net_device *dev)
+static inline bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index d97f6892c019..c2e542b27a5a 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -10,7 +10,7 @@ extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
-extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport);
extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0113d306fcb0..e147f42d643d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -18,7 +18,6 @@
#ifndef _TCP_H
#define _TCP_H
-#define TCP_DEBUG 1
#define FASTRETRANS_DEBUG 1
#include <linux/list.h>
@@ -327,9 +326,9 @@ extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len);
+ const struct tcphdr *th, unsigned int len);
extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len);
+ const struct tcphdr *th, unsigned int len);
extern void tcp_rcv_space_adjust(struct sock *sk);
extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -401,10 +400,10 @@ extern void tcp_set_keepalive(struct sock *sk, int val);
extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int nonblock, int flags, int *addr_len);
-extern void tcp_parse_options(struct sk_buff *skb,
- struct tcp_options_received *opt_rx, u8 **hvpp,
+extern void tcp_parse_options(const struct sk_buff *skb,
+ struct tcp_options_received *opt_rx, const u8 **hvpp,
int estab);
-extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
+extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
* TCP v4 functions exported for the inet6 API
@@ -450,7 +449,7 @@ extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
/* From net/ipv6/syncookies.c */
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
-extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
+extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
__u16 *mss);
#else
static inline __u32 cookie_v6_init_sequence(struct sock *sk,
@@ -522,7 +521,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
}
/* tcp.c */
-extern void tcp_get_info(struct sock *, struct tcp_info *);
+extern void tcp_get_info(const struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
@@ -532,8 +531,8 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
extern void tcp_initialize_rcv_mss(struct sock *sk);
-extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(struct sock *sk, int mss);
+extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu);
+extern int tcp_mss_to_mtu(const struct sock *sk, int mss);
extern void tcp_mtup_init(struct sock *sk);
extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
@@ -574,7 +573,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
/* Compute the actual rto_min value */
static inline u32 tcp_rto_min(struct sock *sk)
{
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = TCP_RTO_MIN;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
@@ -820,6 +819,7 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
static inline __u32 tcp_current_ssthresh(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+
if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
return tp->snd_ssthresh;
else
@@ -832,7 +832,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
-extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
+extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto". This will be the default - same as
@@ -861,7 +861,7 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
static inline void tcp_check_probe_timer(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
if (!tp->packets_out && !icsk->icsk_pending)
@@ -1184,8 +1184,9 @@ struct tcp_md5sig_pool {
/* - functions */
extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- struct sock *sk, struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk,
struct sock *addr_sk);
extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey,
@@ -1208,11 +1209,11 @@ extern void tcp_free_md5sig_pool(void);
extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
extern void tcp_put_md5sig_pool(void);
-extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
-extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
+extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
+extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
unsigned header_len);
extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
- struct tcp_md5sig_key *key);
+ const struct tcp_md5sig_key *key);
/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
@@ -1225,22 +1226,24 @@ static inline void tcp_write_queue_purge(struct sock *sk)
tcp_clear_all_retrans_hints(tcp_sk(sk));
}
-static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
+static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
{
return skb_peek(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
+static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
{
return skb_peek_tail(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb_queue_next(&sk->sk_write_queue, skb);
}
-static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb_queue_prev(&sk->sk_write_queue, skb);
}
@@ -1254,7 +1257,7 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
-static inline struct sk_buff *tcp_send_head(struct sock *sk)
+static inline struct sk_buff *tcp_send_head(const struct sock *sk)
{
return sk->sk_send_head;
}
@@ -1265,7 +1268,7 @@ static inline bool tcp_skb_is_last(const struct sock *sk,
return skb_queue_is_last(&sk->sk_write_queue, skb);
}
-static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
+static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_is_last(sk, skb))
sk->sk_send_head = NULL;
@@ -1445,9 +1448,9 @@ struct tcp_sock_af_ops {
struct sock *addr_sk);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
- struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
int (*md5_add) (struct sock *sk,
struct sock *addr_sk,
u8 *newkey,
@@ -1464,9 +1467,9 @@ struct tcp_request_sock_ops {
struct request_sock *req);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
- struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
#endif
};
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 673a024c6b2a..5f097ca7d5c5 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
return 0;
}
-static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh)
+/* Slow-path computation of checksum. Socket is locked. */
+static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
{
+ const struct udp_sock *up = udp_sk(skb->sk);
int cscov = up->len;
+ __wsum csum = 0;
- /*
- * Sender has set `partial coverage' option on UDP-Lite socket
- */
- if (up->pcflag & UDPLITE_SEND_CC) {
+ if (up->pcflag & UDPLITE_SEND_CC) {
+ /*
+ * Sender has set `partial coverage' option on UDP-Lite socket.
+ * The special case "up->pcslen == 0" signifies full coverage.
+ */
if (up->pcslen < up->len) {
- /* up->pcslen == 0 means that full coverage is required,
- * partial coverage only if 0 < up->pcslen < up->len */
- if (0 < up->pcslen) {
- cscov = up->pcslen;
- }
- uh->len = htons(up->pcslen);
+ if (0 < up->pcslen)
+ cscov = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
}
- /*
- * NOTE: Causes for the error case `up->pcslen > up->len':
- * (i) Application error (will not be penalized).
- * (ii) Payload too big for send buffer: data is split
- * into several packets, each with its own header.
- * In this case (e.g. last segment), coverage may
- * exceed packet length.
- * Since packets with coverage length > packet length are
- * illegal, we fall back to the defaults here.
- */
+ /*
+ * NOTE: Causes for the error case `up->pcslen > up->len':
+ * (i) Application error (will not be penalized).
+ * (ii) Payload too big for send buffer: data is split
+ * into several packets, each with its own header.
+ * In this case (e.g. last segment), coverage may
+ * exceed packet length.
+ * Since packets with coverage length > packet length are
+ * illegal, we fall back to the defaults here.
+ */
}
- return cscov;
-}
-
-static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
-{
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
- __wsum csum = 0;
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
@@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum;
}
+/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
+ const struct udp_sock *up = udp_sk(skb->sk);
const int off = skb_transport_offset(skb);
- const int len = skb->len - off;
+ int len = skb->len - off;
+ if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+ if (0 < up->pcslen)
+ len = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
+ }
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
- return skb_checksum(skb, off, min(cscov, len), 0);
+ return skb_checksum(skb, off, len, 0);
}
extern void udplite4_register(void);
OpenPOWER on IntegriCloud