diff options
author | Patrick McHardy <kaber@trash.net> | 2013-04-17 06:47:00 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-04-19 14:57:57 -0400 |
commit | cf0a018ac669955c10e4fca24fa55dde58434e9a (patch) | |
tree | b2c087fe536127cf30639a773a8da78452994041 | |
parent | 1298ca4671acb10310baa550ed044c553e3a3387 (diff) | |
download | blackbird-op-linux-cf0a018ac669955c10e4fca24fa55dde58434e9a.tar.gz blackbird-op-linux-cf0a018ac669955c10e4fca24fa55dde58434e9a.zip |
netlink: add netlink_skb_set_owner_r()
For mmap'ed I/O a netlink specific skb destructor needs to be invoked
after the final kfree_skb() to clean up state. This doesn't work currently
since the skb's ownership is transfered to the receiving socket using
skb_set_owner_r(), which orphans the skb, thereby invoking the destructor
prematurely.
Since netlink doesn't account skbs to the originating socket, there's no
need to orphan the skb. Add a netlink specific skb_set_owner_r() variant
that does not orphan the skb and use a netlink specific destructor to
call sock_rfree().
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/netlink/af_netlink.c | 20 |
1 files changed, 17 insertions, 3 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 26779c24b1d4..58b9025978fa 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -119,6 +119,20 @@ static void netlink_consume_callback(struct netlink_callback *cb) kfree(cb); } +static void netlink_skb_destructor(struct sk_buff *skb) +{ + sock_rfree(skb); +} + +static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) +{ + WARN_ON(skb->sk != NULL); + skb->sk = sk; + skb->destructor = netlink_skb_destructor; + atomic_add(skb->truesize, &sk->sk_rmem_alloc); + sk_mem_charge(sk, skb->truesize); +} + static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); @@ -820,7 +834,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, } return 1; } - skb_set_owner_r(skb, sk); + netlink_skb_set_owner_r(skb, sk); return 0; } @@ -890,7 +904,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, ret = -ECONNREFUSED; if (nlk->netlink_rcv != NULL) { ret = skb->len; - skb_set_owner_r(skb, sk); + netlink_skb_set_owner_r(skb, sk); NETLINK_CB(skb).sk = ssk; nlk->netlink_rcv(skb); consume_skb(skb); @@ -962,7 +976,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && !test_bit(NETLINK_CONGESTED, &nlk->state)) { - skb_set_owner_r(skb, sk); + netlink_skb_set_owner_r(skb, sk); __netlink_sendskb(sk, skb); return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); } |