summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorstephen hemminger <stephen@networkplumber.org>2013-12-08 12:15:44 -0800
committerDavid S. Miller <davem@davemloft.net>2013-12-10 21:57:11 -0500
commit8e3bff96afa67369008153f3326fa5ce985cabab (patch)
treeee0e38aa3b976362016a9a9727c60be63aed376e /net/ipv4
parent22a93216140e5097e8d9d2f99784cfd1c6158ee6 (diff)
downloadblackbird-op-linux-8e3bff96afa67369008153f3326fa5ce985cabab.tar.gz
blackbird-op-linux-8e3bff96afa67369008153f3326fa5ce985cabab.zip
net: more spelling fixes
Various spelling fixes in networking stack Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/tcp_output.c10
2 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ddf32a6bc415..a9fc435dc89f 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1051,7 +1051,7 @@ e_inval:
*
* To support IP_CMSG_PKTINFO option, we store rt_iif and specific
* destination in skb->cb[] before dst drop.
- * This way, receiver doesnt make cache line misses to read rtable.
+ * This way, receiver doesn't make cache line misses to read rtable.
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
{
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 993da005e087..2a69f42e51ca 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -408,7 +408,7 @@ struct tcp_out_options {
* Beware: Something in the Internet is very sensitive to the ordering of
* TCP options, we learned this through the hard way, so be careful here.
* Luckily we can at least blame others for their non-compliance but from
- * inter-operatibility perspective it seems that we're somewhat stuck with
+ * inter-operability perspective it seems that we're somewhat stuck with
* the ordering which we have been using if we want to keep working with
* those broken things (not that it currently hurts anybody as there isn't
* particular reason why the ordering would need to be changed).
@@ -681,7 +681,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
*
* Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
* needs to be reallocated in a driver.
- * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
+ * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
*
* Since transmit from skb destructor is forbidden, we use a tasklet
* to process all sockets that eventually need to send more skbs.
@@ -701,9 +701,9 @@ static void tcp_tsq_handler(struct sock *sk)
tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
}
/*
- * One tasklest per cpu tries to send more skbs.
+ * One tasklet per cpu tries to send more skbs.
* We run in tasklet context but need to disable irqs when
- * transfering tsq->head because tcp_wfree() might
+ * transferring tsq->head because tcp_wfree() might
* interrupt us (non NAPI drivers)
*/
static void tcp_tasklet_func(unsigned long data)
@@ -797,7 +797,7 @@ void __init tcp_tasklet_init(void)
/*
* Write buffer destructor automatically called from kfree_skb.
- * We cant xmit new skbs from this context, as we might already
+ * We can't xmit new skbs from this context, as we might already
* hold qdisc lock.
*/
void tcp_wfree(struct sk_buff *skb)
OpenPOWER on IntegriCloud