summaryrefslogtreecommitdiffstats
path: root/net/packet
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-12-06 20:50:09 +0000
committerDavid S. Miller <davem@davemloft.net>2010-12-08 10:30:34 -0800
commit62ab0812137ec4f9884dd7de346238841ac03283 (patch)
treeda0807aee4597522b0ecabc51d2d9fc300895d98 /net/packet
parent38f49e8801565674c424896c3dcb4228410b43a8 (diff)
downloadblackbird-op-linux-62ab0812137ec4f9884dd7de346238841ac03283.tar.gz
blackbird-op-linux-62ab0812137ec4f9884dd7de346238841ac03283.zip
filter: constify sk_run_filter()
sk_run_filter() doesnt write on skb, change its prototype to reflect this. Fix two af_packet comments. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a11c731d2ee4..17eafe5b48c6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -517,7 +517,8 @@ out_free:
return err;
}
-static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
+static inline unsigned int run_filter(const struct sk_buff *skb,
+ const struct sock *sk,
unsigned int res)
{
struct sk_filter *filter;
@@ -532,15 +533,15 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
}
/*
- This function makes lazy skb cloning in hope that most of packets
- are discarded by BPF.
-
- Note tricky part: we DO mangle shared skb! skb->data, skb->len
- and skb->cb are mangled. It works because (and until) packets
- falling here are owned by current CPU. Output packets are cloned
- by dev_queue_xmit_nit(), input packets are processed by net_bh
- sequencially, so that if we return skb to original state on exit,
- we will not harm anyone.
+ * This function makes lazy skb cloning in hope that most of packets
+ * are discarded by BPF.
+ *
+ * Note tricky part: we DO mangle shared skb! skb->data, skb->len
+ * and skb->cb are mangled. It works because (and until) packets
+ * falling here are owned by current CPU. Output packets are cloned
+ * by dev_queue_xmit_nit(), input packets are processed by net_bh
+ * sequencially, so that if we return skb to original state on exit,
+ * we will not harm anyone.
*/
static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -566,11 +567,11 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
if (dev->header_ops) {
/* The device has an explicit notion of ll header,
- exported to higher levels.
-
- Otherwise, the device hides datails of it frame
- structure, so that corresponding packet head
- never delivered to user.
+ * exported to higher levels.
+ *
+ * Otherwise, the device hides details of its frame
+ * structure, so that corresponding packet head is
+ * never delivered to user.
*/
if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb_mac_header(skb));
OpenPOWER on IntegriCloud