diff options
author | Eliezer Tamir <eliezer.tamir@linux.intel.com> | 2013-06-14 16:33:57 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-06-17 15:48:14 -0700 |
commit | dafcc4380deec21d160c31411f33c8813f67f517 (patch) | |
tree | 4d1984857eca0b470d229604a5634fee87da7073 /include/net | |
parent | 89bf1b5a683df497c572c4d3bd3f9c9aa919d773 (diff) | |
download | blackbird-op-linux-dafcc4380deec21d160c31411f33c8813f67f517.tar.gz blackbird-op-linux-dafcc4380deec21d160c31411f33c8813f67f517.zip |
net: add socket option for low latency polling
adds a socket option for low latency polling.
This allows overriding the global sysctl value with a per-socket one.
Unexport sysctl_net_ll_poll since for now it's not needed in modules.
Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r-- | include/net/ll_poll.h | 12 | ||||
-rw-r--r-- | include/net/sock.h | 2 |
2 files changed, 8 insertions, 6 deletions
diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 6930cbd943e2..fcc7c365cee5 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -39,12 +39,12 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; /* we can use sched_clock() because we don't care much about precision * we only care that the average is bounded */ -static inline u64 ll_end_time(void) +static inline u64 ll_end_time(struct sock *sk) { - u64 end_time = ACCESS_ONCE(sysctl_net_ll_poll); + u64 end_time = ACCESS_ONCE(sk->sk_ll_usec); /* we don't mind a ~2.5% imprecision - * sysctl_net_ll_poll is a u_int so this can't overflow + * sk->sk_ll_usec is a u_int so this can't overflow */ end_time = (end_time << 10) + sched_clock(); @@ -53,7 +53,7 @@ static inline u64 ll_end_time(void) static inline bool sk_valid_ll(struct sock *sk) { - return sysctl_net_ll_poll && sk->sk_napi_id && + return sk->sk_ll_usec && sk->sk_napi_id && !need_resched() && !signal_pending(current); } @@ -65,7 +65,7 @@ static inline bool can_poll_ll(u64 end_time) static inline bool sk_poll_ll(struct sock *sk, int nonblock) { const struct net_device_ops *ops; - u64 end_time = ll_end_time(); + u64 end_time = ll_end_time(sk); struct napi_struct *napi; int rc = false; @@ -118,7 +118,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) #else /* CONFIG_NET_LL_RX_POLL */ -static inline u64 ll_end_time(void) +static inline u64 ll_end_time(struct sock *sk) { return 0; } diff --git a/include/net/sock.h b/include/net/sock.h index ac8e1818380c..21db792bffa5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -230,6 +230,7 @@ struct cg_proto; * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward * @sk_napi_id: id of the last napi context to receive data for sk + * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, @@ -328,6 +329,7 @@ struct sock { #endif #ifdef CONFIG_NET_LL_RX_POLL unsigned int sk_napi_id; + unsigned int sk_ll_usec; #endif atomic_t sk_drops; int sk_rcvbuf; |